1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Texture sampling -- SoA.
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 * @author Brian Paul <brianp@vmware.com>
36 #include "pipe/p_defines.h"
37 #include "pipe/p_state.h"
38 #include "pipe/p_shader_tokens.h"
39 #include "util/u_debug.h"
40 #include "util/u_dump.h"
41 #include "util/u_memory.h"
42 #include "util/u_math.h"
43 #include "util/u_format.h"
44 #include "util/u_cpu_detect.h"
45 #include "util/u_format_rgb9e5.h"
46 #include "lp_bld_debug.h"
47 #include "lp_bld_type.h"
48 #include "lp_bld_const.h"
49 #include "lp_bld_conv.h"
50 #include "lp_bld_arit.h"
51 #include "lp_bld_bitarit.h"
52 #include "lp_bld_logic.h"
53 #include "lp_bld_printf.h"
54 #include "lp_bld_swizzle.h"
55 #include "lp_bld_flow.h"
56 #include "lp_bld_gather.h"
57 #include "lp_bld_format.h"
58 #include "lp_bld_sample.h"
59 #include "lp_bld_sample_aos.h"
60 #include "lp_bld_struct.h"
61 #include "lp_bld_quad.h"
62 #include "lp_bld_pack.h"
66 * Generate code to fetch a texel from a texture at int coords (x, y, z).
67 * The computation depends on whether the texture is 1D, 2D or 3D.
68 * The result, texel, will be float vectors:
69 * texel[0] = red values
70 * texel[1] = green values
71 * texel[2] = blue values
72 * texel[3] = alpha values
75 lp_build_sample_texel_soa(struct lp_build_sample_context
*bld
,
82 LLVMValueRef y_stride
,
83 LLVMValueRef z_stride
,
84 LLVMValueRef data_ptr
,
85 LLVMValueRef mipoffsets
,
86 LLVMValueRef texel_out
[4])
88 const struct lp_static_sampler_state
*static_state
= bld
->static_sampler_state
;
89 const unsigned dims
= bld
->dims
;
90 struct lp_build_context
*int_coord_bld
= &bld
->int_coord_bld
;
91 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
94 LLVMValueRef use_border
= NULL
;
96 /* use_border = x < 0 || x >= width || y < 0 || y >= height */
97 if (lp_sampler_wrap_mode_uses_border_color(static_state
->wrap_s
,
98 static_state
->min_img_filter
,
99 static_state
->mag_img_filter
)) {
101 b1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_LESS
, x
, int_coord_bld
->zero
);
102 b2
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_GEQUAL
, x
, width
);
103 use_border
= LLVMBuildOr(builder
, b1
, b2
, "b1_or_b2");
107 lp_sampler_wrap_mode_uses_border_color(static_state
->wrap_t
,
108 static_state
->min_img_filter
,
109 static_state
->mag_img_filter
)) {
111 b1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_LESS
, y
, int_coord_bld
->zero
);
112 b2
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_GEQUAL
, y
, height
);
114 use_border
= LLVMBuildOr(builder
, use_border
, b1
, "ub_or_b1");
115 use_border
= LLVMBuildOr(builder
, use_border
, b2
, "ub_or_b2");
118 use_border
= LLVMBuildOr(builder
, b1
, b2
, "b1_or_b2");
123 lp_sampler_wrap_mode_uses_border_color(static_state
->wrap_r
,
124 static_state
->min_img_filter
,
125 static_state
->mag_img_filter
)) {
127 b1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_LESS
, z
, int_coord_bld
->zero
);
128 b2
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_GEQUAL
, z
, depth
);
130 use_border
= LLVMBuildOr(builder
, use_border
, b1
, "ub_or_b1");
131 use_border
= LLVMBuildOr(builder
, use_border
, b2
, "ub_or_b2");
134 use_border
= LLVMBuildOr(builder
, b1
, b2
, "b1_or_b2");
138 /* convert x,y,z coords to linear offset from start of texture, in bytes */
139 lp_build_sample_offset(&bld
->int_coord_bld
,
141 x
, y
, z
, y_stride
, z_stride
,
144 offset
= lp_build_add(&bld
->int_coord_bld
, offset
, mipoffsets
);
148 /* If we can sample the border color, it means that texcoords may
149 * lie outside the bounds of the texture image. We need to do
150 * something to prevent reading out of bounds and causing a segfault.
152 * Simply AND the texture coords with !use_border. This will cause
153 * coords which are out of bounds to become zero. Zero's guaranteed
154 * to be inside the texture image.
156 offset
= lp_build_andnot(&bld
->int_coord_bld
, offset
, use_border
);
159 lp_build_fetch_rgba_soa(bld
->gallivm
,
167 * Note: if we find an app which frequently samples the texture border
168 * we might want to implement a true conditional here to avoid sampling
169 * the texture whenever possible (since that's quite a bit of code).
172 * texel = border_color;
175 * texel = sample_texture(coord);
177 * As it is now, we always sample the texture, then selectively replace
178 * the texel color results with the border color.
182 /* select texel color or border color depending on use_border. */
183 const struct util_format_description
*format_desc
= bld
->format_desc
;
185 struct lp_type border_type
= bld
->texel_type
;
186 border_type
.length
= 4;
188 * Only replace channels which are actually present. The others should
189 * get optimized away eventually by sampler_view swizzle anyway but it's
192 for (chan
= 0; chan
< 4; chan
++) {
194 /* reverse-map channel... */
195 for (chan_s
= 0; chan_s
< 4; chan_s
++) {
196 if (chan_s
== format_desc
->swizzle
[chan
]) {
201 /* use the already clamped color */
202 LLVMValueRef idx
= lp_build_const_int32(bld
->gallivm
, chan
);
203 LLVMValueRef border_chan
;
205 border_chan
= lp_build_extract_broadcast(bld
->gallivm
,
208 bld
->border_color_clamped
,
210 texel_out
[chan
] = lp_build_select(&bld
->texel_bld
, use_border
,
211 border_chan
, texel_out
[chan
]);
219 * Helper to compute the mirror function for the PIPE_WRAP_MIRROR modes.
222 lp_build_coord_mirror(struct lp_build_sample_context
*bld
,
225 struct lp_build_context
*coord_bld
= &bld
->coord_bld
;
226 struct lp_build_context
*int_coord_bld
= &bld
->int_coord_bld
;
227 LLVMValueRef fract
, flr
, isOdd
;
229 lp_build_ifloor_fract(coord_bld
, coord
, &flr
, &fract
);
231 /* isOdd = flr & 1 */
232 isOdd
= LLVMBuildAnd(bld
->gallivm
->builder
, flr
, int_coord_bld
->one
, "");
234 /* make coord positive or negative depending on isOdd */
235 coord
= lp_build_set_sign(coord_bld
, fract
, isOdd
);
237 /* convert isOdd to float */
238 isOdd
= lp_build_int_to_float(coord_bld
, isOdd
);
240 /* add isOdd to coord */
241 coord
= lp_build_add(coord_bld
, coord
, isOdd
);
248 * Helper to compute the first coord and the weight for
249 * linear wrap repeat npot textures
252 lp_build_coord_repeat_npot_linear(struct lp_build_sample_context
*bld
,
253 LLVMValueRef coord_f
,
254 LLVMValueRef length_i
,
255 LLVMValueRef length_f
,
256 LLVMValueRef
*coord0_i
,
257 LLVMValueRef
*weight_f
)
259 struct lp_build_context
*coord_bld
= &bld
->coord_bld
;
260 struct lp_build_context
*int_coord_bld
= &bld
->int_coord_bld
;
261 LLVMValueRef half
= lp_build_const_vec(bld
->gallivm
, coord_bld
->type
, 0.5);
262 LLVMValueRef length_minus_one
= lp_build_sub(int_coord_bld
, length_i
,
265 /* wrap with normalized floats is just fract */
266 coord_f
= lp_build_fract(coord_bld
, coord_f
);
267 /* mul by size and subtract 0.5 */
268 coord_f
= lp_build_mul(coord_bld
, coord_f
, length_f
);
269 coord_f
= lp_build_sub(coord_bld
, coord_f
, half
);
271 * we avoided the 0.5/length division before the repeat wrap,
272 * now need to fix up edge cases with selects
274 /* convert to int, compute lerp weight */
275 lp_build_ifloor_fract(coord_bld
, coord_f
, coord0_i
, weight_f
);
276 mask
= lp_build_compare(int_coord_bld
->gallivm
, int_coord_bld
->type
,
277 PIPE_FUNC_LESS
, *coord0_i
, int_coord_bld
->zero
);
278 *coord0_i
= lp_build_select(int_coord_bld
, mask
, length_minus_one
, *coord0_i
);
283 * Build LLVM code for texture wrap mode for linear filtering.
284 * \param x0_out returns first integer texcoord
285 * \param x1_out returns second integer texcoord
286 * \param weight_out returns linear interpolation weight
289 lp_build_sample_wrap_linear(struct lp_build_sample_context
*bld
,
292 LLVMValueRef length_f
,
296 LLVMValueRef
*x0_out
,
297 LLVMValueRef
*x1_out
,
298 LLVMValueRef
*weight_out
)
300 struct lp_build_context
*coord_bld
= &bld
->coord_bld
;
301 struct lp_build_context
*int_coord_bld
= &bld
->int_coord_bld
;
302 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
303 LLVMValueRef half
= lp_build_const_vec(bld
->gallivm
, coord_bld
->type
, 0.5);
304 LLVMValueRef length_minus_one
= lp_build_sub(int_coord_bld
, length
, int_coord_bld
->one
);
305 LLVMValueRef coord0
, coord1
, weight
;
308 case PIPE_TEX_WRAP_REPEAT
:
310 /* mul by size and subtract 0.5 */
311 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
312 coord
= lp_build_sub(coord_bld
, coord
, half
);
314 offset
= lp_build_int_to_float(coord_bld
, offset
);
315 coord
= lp_build_add(coord_bld
, coord
, offset
);
317 /* convert to int, compute lerp weight */
318 lp_build_ifloor_fract(coord_bld
, coord
, &coord0
, &weight
);
319 coord1
= lp_build_add(int_coord_bld
, coord0
, int_coord_bld
->one
);
321 coord0
= LLVMBuildAnd(builder
, coord0
, length_minus_one
, "");
322 coord1
= LLVMBuildAnd(builder
, coord1
, length_minus_one
, "");
327 offset
= lp_build_int_to_float(coord_bld
, offset
);
328 offset
= lp_build_div(coord_bld
, offset
, length_f
);
329 coord
= lp_build_add(coord_bld
, coord
, offset
);
331 lp_build_coord_repeat_npot_linear(bld
, coord
,
334 mask
= lp_build_compare(int_coord_bld
->gallivm
, int_coord_bld
->type
,
335 PIPE_FUNC_NOTEQUAL
, coord0
, length_minus_one
);
336 coord1
= LLVMBuildAnd(builder
,
337 lp_build_add(int_coord_bld
, coord0
, int_coord_bld
->one
),
342 case PIPE_TEX_WRAP_CLAMP
:
343 if (bld
->static_sampler_state
->normalized_coords
) {
344 /* scale coord to length */
345 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
348 offset
= lp_build_int_to_float(coord_bld
, offset
);
349 coord
= lp_build_add(coord_bld
, coord
, offset
);
352 /* clamp to [0, length] */
353 coord
= lp_build_clamp(coord_bld
, coord
, coord_bld
->zero
, length_f
);
355 coord
= lp_build_sub(coord_bld
, coord
, half
);
357 /* convert to int, compute lerp weight */
358 lp_build_ifloor_fract(coord_bld
, coord
, &coord0
, &weight
);
359 coord1
= lp_build_add(int_coord_bld
, coord0
, int_coord_bld
->one
);
362 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
364 struct lp_build_context abs_coord_bld
= bld
->coord_bld
;
365 abs_coord_bld
.type
.sign
= FALSE
;
367 if (bld
->static_sampler_state
->normalized_coords
) {
368 /* mul by tex size */
369 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
372 offset
= lp_build_int_to_float(coord_bld
, offset
);
373 coord
= lp_build_add(coord_bld
, coord
, offset
);
376 /* clamp to length max */
377 coord
= lp_build_min(coord_bld
, coord
, length_f
);
379 coord
= lp_build_sub(coord_bld
, coord
, half
);
380 /* clamp to [0, length - 0.5] */
381 coord
= lp_build_max(coord_bld
, coord
, coord_bld
->zero
);
382 /* convert to int, compute lerp weight */
383 lp_build_ifloor_fract(&abs_coord_bld
, coord
, &coord0
, &weight
);
384 coord1
= lp_build_add(int_coord_bld
, coord0
, int_coord_bld
->one
);
385 /* coord1 = min(coord1, length-1) */
386 coord1
= lp_build_min(int_coord_bld
, coord1
, length_minus_one
);
390 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
391 if (bld
->static_sampler_state
->normalized_coords
) {
392 /* scale coord to length */
393 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
396 offset
= lp_build_int_to_float(coord_bld
, offset
);
397 coord
= lp_build_add(coord_bld
, coord
, offset
);
399 /* was: clamp to [-0.5, length + 0.5], then sub 0.5 */
400 /* can skip clamp (though might not work for very large coord values */
401 coord
= lp_build_sub(coord_bld
, coord
, half
);
402 /* convert to int, compute lerp weight */
403 lp_build_ifloor_fract(coord_bld
, coord
, &coord0
, &weight
);
404 coord1
= lp_build_add(int_coord_bld
, coord0
, int_coord_bld
->one
);
407 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
408 /* compute mirror function */
409 coord
= lp_build_coord_mirror(bld
, coord
);
411 /* scale coord to length */
412 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
413 coord
= lp_build_sub(coord_bld
, coord
, half
);
415 offset
= lp_build_int_to_float(coord_bld
, offset
);
416 coord
= lp_build_add(coord_bld
, coord
, offset
);
419 /* convert to int, compute lerp weight */
420 lp_build_ifloor_fract(coord_bld
, coord
, &coord0
, &weight
);
421 coord1
= lp_build_add(int_coord_bld
, coord0
, int_coord_bld
->one
);
423 /* coord0 = max(coord0, 0) */
424 coord0
= lp_build_max(int_coord_bld
, coord0
, int_coord_bld
->zero
);
425 /* coord1 = min(coord1, length-1) */
426 coord1
= lp_build_min(int_coord_bld
, coord1
, length_minus_one
);
429 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
430 if (bld
->static_sampler_state
->normalized_coords
) {
431 /* scale coord to length */
432 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
435 offset
= lp_build_int_to_float(coord_bld
, offset
);
436 coord
= lp_build_add(coord_bld
, coord
, offset
);
438 coord
= lp_build_abs(coord_bld
, coord
);
440 /* clamp to [0, length] */
441 coord
= lp_build_min(coord_bld
, coord
, length_f
);
443 coord
= lp_build_sub(coord_bld
, coord
, half
);
445 /* convert to int, compute lerp weight */
446 lp_build_ifloor_fract(coord_bld
, coord
, &coord0
, &weight
);
447 coord1
= lp_build_add(int_coord_bld
, coord0
, int_coord_bld
->one
);
450 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
452 struct lp_build_context abs_coord_bld
= bld
->coord_bld
;
453 abs_coord_bld
.type
.sign
= FALSE
;
455 if (bld
->static_sampler_state
->normalized_coords
) {
456 /* scale coord to length */
457 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
460 offset
= lp_build_int_to_float(coord_bld
, offset
);
461 coord
= lp_build_add(coord_bld
, coord
, offset
);
463 coord
= lp_build_abs(coord_bld
, coord
);
465 /* clamp to length max */
466 coord
= lp_build_min(coord_bld
, coord
, length_f
);
468 coord
= lp_build_sub(coord_bld
, coord
, half
);
469 /* clamp to [0, length - 0.5] */
470 coord
= lp_build_max(coord_bld
, coord
, coord_bld
->zero
);
472 /* convert to int, compute lerp weight */
473 lp_build_ifloor_fract(&abs_coord_bld
, coord
, &coord0
, &weight
);
474 coord1
= lp_build_add(int_coord_bld
, coord0
, int_coord_bld
->one
);
475 /* coord1 = min(coord1, length-1) */
476 coord1
= lp_build_min(int_coord_bld
, coord1
, length_minus_one
);
480 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
482 if (bld
->static_sampler_state
->normalized_coords
) {
483 /* scale coord to length */
484 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
487 offset
= lp_build_int_to_float(coord_bld
, offset
);
488 coord
= lp_build_add(coord_bld
, coord
, offset
);
490 coord
= lp_build_abs(coord_bld
, coord
);
492 /* was: clamp to [-0.5, length + 0.5] then sub 0.5 */
493 /* skip clamp - always positive, and other side
494 only potentially matters for very large coords */
495 coord
= lp_build_sub(coord_bld
, coord
, half
);
497 /* convert to int, compute lerp weight */
498 lp_build_ifloor_fract(coord_bld
, coord
, &coord0
, &weight
);
499 coord1
= lp_build_add(int_coord_bld
, coord0
, int_coord_bld
->one
);
512 *weight_out
= weight
;
517 * Build LLVM code for texture wrap mode for nearest filtering.
518 * \param coord the incoming texcoord (nominally in [0,1])
519 * \param length the texture size along one dimension, as int vector
520 * \param length_f the texture size along one dimension, as float vector
521 * \param offset texel offset along one dimension (as int vector)
522 * \param is_pot if TRUE, length is a power of two
523 * \param wrap_mode one of PIPE_TEX_WRAP_x
526 lp_build_sample_wrap_nearest(struct lp_build_sample_context
*bld
,
529 LLVMValueRef length_f
,
534 struct lp_build_context
*coord_bld
= &bld
->coord_bld
;
535 struct lp_build_context
*int_coord_bld
= &bld
->int_coord_bld
;
536 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
537 LLVMValueRef length_minus_one
= lp_build_sub(int_coord_bld
, length
, int_coord_bld
->one
);
541 case PIPE_TEX_WRAP_REPEAT
:
543 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
544 icoord
= lp_build_ifloor(coord_bld
, coord
);
546 icoord
= lp_build_add(int_coord_bld
, icoord
, offset
);
548 icoord
= LLVMBuildAnd(builder
, icoord
, length_minus_one
, "");
552 offset
= lp_build_int_to_float(coord_bld
, offset
);
553 offset
= lp_build_div(coord_bld
, offset
, length_f
);
554 coord
= lp_build_add(coord_bld
, coord
, offset
);
556 /* take fraction, unnormalize */
557 coord
= lp_build_fract_safe(coord_bld
, coord
);
558 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
559 icoord
= lp_build_itrunc(coord_bld
, coord
);
563 case PIPE_TEX_WRAP_CLAMP
:
564 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
565 if (bld
->static_sampler_state
->normalized_coords
) {
566 /* scale coord to length */
567 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
571 /* use itrunc instead since we clamp to 0 anyway */
572 icoord
= lp_build_itrunc(coord_bld
, coord
);
574 icoord
= lp_build_add(int_coord_bld
, icoord
, offset
);
577 /* clamp to [0, length - 1]. */
578 icoord
= lp_build_clamp(int_coord_bld
, icoord
, int_coord_bld
->zero
,
582 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
583 if (bld
->static_sampler_state
->normalized_coords
) {
584 /* scale coord to length */
585 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
587 /* no clamp necessary, border masking will handle this */
588 icoord
= lp_build_ifloor(coord_bld
, coord
);
590 icoord
= lp_build_add(int_coord_bld
, icoord
, offset
);
594 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
596 offset
= lp_build_int_to_float(coord_bld
, offset
);
597 offset
= lp_build_div(coord_bld
, offset
, length_f
);
598 coord
= lp_build_add(coord_bld
, coord
, offset
);
600 /* compute mirror function */
601 coord
= lp_build_coord_mirror(bld
, coord
);
603 /* scale coord to length */
604 assert(bld
->static_sampler_state
->normalized_coords
);
605 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
607 /* itrunc == ifloor here */
608 icoord
= lp_build_itrunc(coord_bld
, coord
);
610 /* clamp to [0, length - 1] */
611 icoord
= lp_build_min(int_coord_bld
, icoord
, length_minus_one
);
614 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
615 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
616 if (bld
->static_sampler_state
->normalized_coords
) {
617 /* scale coord to length */
618 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
621 offset
= lp_build_int_to_float(coord_bld
, offset
);
622 coord
= lp_build_add(coord_bld
, coord
, offset
);
624 coord
= lp_build_abs(coord_bld
, coord
);
626 /* itrunc == ifloor here */
627 icoord
= lp_build_itrunc(coord_bld
, coord
);
629 /* clamp to [0, length - 1] */
630 icoord
= lp_build_min(int_coord_bld
, icoord
, length_minus_one
);
633 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
634 if (bld
->static_sampler_state
->normalized_coords
) {
635 /* scale coord to length */
636 coord
= lp_build_mul(coord_bld
, coord
, length_f
);
639 offset
= lp_build_int_to_float(coord_bld
, offset
);
640 coord
= lp_build_add(coord_bld
, coord
, offset
);
642 coord
= lp_build_abs(coord_bld
, coord
);
644 /* itrunc == ifloor here */
645 icoord
= lp_build_itrunc(coord_bld
, coord
);
658 * Do shadow test/comparison.
659 * \param p shadow ref value
660 * \param texel the texel to compare against
663 lp_build_sample_comparefunc(struct lp_build_sample_context
*bld
,
667 struct lp_build_context
*texel_bld
= &bld
->texel_bld
;
671 //lp_build_print_value(bld->gallivm, "shadow cmp coord", p);
672 lp_build_print_value(bld
->gallivm
, "shadow cmp texel", texel
);
675 /* result = (p FUNC texel) ? 1 : 0 */
677 * honor d3d10 floating point rules here, which state that comparisons
678 * are ordered except NOT_EQUAL which is unordered.
680 if (bld
->static_sampler_state
->compare_func
!= PIPE_FUNC_NOTEQUAL
) {
681 res
= lp_build_cmp_ordered(texel_bld
, bld
->static_sampler_state
->compare_func
,
685 res
= lp_build_cmp(texel_bld
, bld
->static_sampler_state
->compare_func
,
693 * Generate code to sample a mipmap level with nearest filtering.
694 * If sampling a cube texture, r = cube face in [0,5].
697 lp_build_sample_image_nearest(struct lp_build_sample_context
*bld
,
699 LLVMValueRef row_stride_vec
,
700 LLVMValueRef img_stride_vec
,
701 LLVMValueRef data_ptr
,
702 LLVMValueRef mipoffsets
,
703 LLVMValueRef
*coords
,
704 const LLVMValueRef
*offsets
,
705 LLVMValueRef colors_out
[4])
707 const unsigned dims
= bld
->dims
;
708 LLVMValueRef width_vec
;
709 LLVMValueRef height_vec
;
710 LLVMValueRef depth_vec
;
711 LLVMValueRef flt_size
;
712 LLVMValueRef flt_width_vec
;
713 LLVMValueRef flt_height_vec
;
714 LLVMValueRef flt_depth_vec
;
715 LLVMValueRef x
, y
= NULL
, z
= NULL
;
717 lp_build_extract_image_sizes(bld
,
721 &width_vec
, &height_vec
, &depth_vec
);
723 flt_size
= lp_build_int_to_float(&bld
->float_size_bld
, size
);
725 lp_build_extract_image_sizes(bld
,
726 &bld
->float_size_bld
,
729 &flt_width_vec
, &flt_height_vec
, &flt_depth_vec
);
732 * Compute integer texcoords.
734 x
= lp_build_sample_wrap_nearest(bld
, coords
[0], width_vec
,
735 flt_width_vec
, offsets
[0],
736 bld
->static_texture_state
->pot_width
,
737 bld
->static_sampler_state
->wrap_s
);
738 lp_build_name(x
, "tex.x.wrapped");
741 y
= lp_build_sample_wrap_nearest(bld
, coords
[1], height_vec
,
742 flt_height_vec
, offsets
[1],
743 bld
->static_texture_state
->pot_height
,
744 bld
->static_sampler_state
->wrap_t
);
745 lp_build_name(y
, "tex.y.wrapped");
748 z
= lp_build_sample_wrap_nearest(bld
, coords
[2], depth_vec
,
749 flt_depth_vec
, offsets
[2],
750 bld
->static_texture_state
->pot_depth
,
751 bld
->static_sampler_state
->wrap_r
);
752 lp_build_name(z
, "tex.z.wrapped");
755 if (bld
->static_texture_state
->target
== PIPE_TEXTURE_CUBE
||
756 bld
->static_texture_state
->target
== PIPE_TEXTURE_1D_ARRAY
||
757 bld
->static_texture_state
->target
== PIPE_TEXTURE_2D_ARRAY
) {
759 lp_build_name(z
, "tex.z.layer");
763 * Get texture colors.
765 lp_build_sample_texel_soa(bld
,
766 width_vec
, height_vec
, depth_vec
,
768 row_stride_vec
, img_stride_vec
,
769 data_ptr
, mipoffsets
, colors_out
);
771 if (bld
->static_sampler_state
->compare_mode
!= PIPE_TEX_COMPARE_NONE
) {
773 cmpval
= lp_build_sample_comparefunc(bld
, coords
[4], colors_out
[0]);
774 /* this is really just a AND 1.0, cmpval but llvm is clever enough */
775 colors_out
[0] = lp_build_select(&bld
->texel_bld
, cmpval
,
776 bld
->texel_bld
.one
, bld
->texel_bld
.zero
);
777 colors_out
[1] = colors_out
[2] = colors_out
[3] = colors_out
[0];
784 * Like a lerp, but inputs are 0/~0 masks, so can simplify slightly.
787 lp_build_masklerp(struct lp_build_context
*bld
,
792 struct gallivm_state
*gallivm
= bld
->gallivm
;
793 LLVMBuilderRef builder
= gallivm
->builder
;
794 LLVMValueRef weight2
;
796 weight2
= lp_build_sub(bld
, bld
->one
, weight
);
797 weight
= LLVMBuildBitCast(builder
, weight
,
798 lp_build_int_vec_type(gallivm
, bld
->type
), "");
799 weight2
= LLVMBuildBitCast(builder
, weight2
,
800 lp_build_int_vec_type(gallivm
, bld
->type
), "");
801 weight
= LLVMBuildAnd(builder
, weight
, mask1
, "");
802 weight2
= LLVMBuildAnd(builder
, weight2
, mask0
, "");
803 weight
= LLVMBuildBitCast(builder
, weight
, bld
->vec_type
, "");
804 weight2
= LLVMBuildBitCast(builder
, weight2
, bld
->vec_type
, "");
805 return lp_build_add(bld
, weight
, weight2
);
809 * Like a 2d lerp, but inputs are 0/~0 masks, so can simplify slightly.
812 lp_build_masklerp2d(struct lp_build_context
*bld
,
813 LLVMValueRef weight0
,
814 LLVMValueRef weight1
,
820 LLVMValueRef val0
= lp_build_masklerp(bld
, weight0
, mask00
, mask01
);
821 LLVMValueRef val1
= lp_build_masklerp(bld
, weight0
, mask10
, mask11
);
822 return lp_build_lerp(bld
, weight1
, val0
, val1
, 0);
826 * Generate code to sample a mipmap level with linear filtering.
827 * If sampling a cube texture, r = cube face in [0,5].
828 * If linear_mask is present, only pixels having their mask set
829 * will receive linear filtering, the rest will use nearest.
832 lp_build_sample_image_linear(struct lp_build_sample_context
*bld
,
834 LLVMValueRef linear_mask
,
835 LLVMValueRef row_stride_vec
,
836 LLVMValueRef img_stride_vec
,
837 LLVMValueRef data_ptr
,
838 LLVMValueRef mipoffsets
,
839 LLVMValueRef
*coords
,
840 const LLVMValueRef
*offsets
,
841 LLVMValueRef colors_out
[4])
843 const unsigned dims
= bld
->dims
;
844 LLVMValueRef width_vec
;
845 LLVMValueRef height_vec
;
846 LLVMValueRef depth_vec
;
847 LLVMValueRef flt_size
;
848 LLVMValueRef flt_width_vec
;
849 LLVMValueRef flt_height_vec
;
850 LLVMValueRef flt_depth_vec
;
851 LLVMValueRef x0
, y0
= NULL
, z0
= NULL
, x1
, y1
= NULL
, z1
= NULL
;
852 LLVMValueRef s_fpart
, t_fpart
= NULL
, r_fpart
= NULL
;
853 LLVMValueRef neighbors
[2][2][4];
856 lp_build_extract_image_sizes(bld
,
860 &width_vec
, &height_vec
, &depth_vec
);
862 flt_size
= lp_build_int_to_float(&bld
->float_size_bld
, size
);
864 lp_build_extract_image_sizes(bld
,
865 &bld
->float_size_bld
,
868 &flt_width_vec
, &flt_height_vec
, &flt_depth_vec
);
871 * Compute integer texcoords.
873 lp_build_sample_wrap_linear(bld
, coords
[0], width_vec
,
874 flt_width_vec
, offsets
[0],
875 bld
->static_texture_state
->pot_width
,
876 bld
->static_sampler_state
->wrap_s
,
878 lp_build_name(x0
, "tex.x0.wrapped");
879 lp_build_name(x1
, "tex.x1.wrapped");
882 lp_build_sample_wrap_linear(bld
, coords
[1], height_vec
,
883 flt_height_vec
, offsets
[1],
884 bld
->static_texture_state
->pot_height
,
885 bld
->static_sampler_state
->wrap_t
,
887 lp_build_name(y0
, "tex.y0.wrapped");
888 lp_build_name(y1
, "tex.y1.wrapped");
891 lp_build_sample_wrap_linear(bld
, coords
[2], depth_vec
,
892 flt_depth_vec
, offsets
[2],
893 bld
->static_texture_state
->pot_depth
,
894 bld
->static_sampler_state
->wrap_r
,
896 lp_build_name(z0
, "tex.z0.wrapped");
897 lp_build_name(z1
, "tex.z1.wrapped");
900 if (bld
->static_texture_state
->target
== PIPE_TEXTURE_CUBE
||
901 bld
->static_texture_state
->target
== PIPE_TEXTURE_1D_ARRAY
||
902 bld
->static_texture_state
->target
== PIPE_TEXTURE_2D_ARRAY
) {
903 z0
= z1
= coords
[2]; /* cube face or layer */
904 lp_build_name(z0
, "tex.z0.layer");
905 lp_build_name(z1
, "tex.z1.layer");
910 * Whack filter weights into place. Whatever pixel had more weight is
911 * the one which should have been selected by nearest filtering hence
912 * just use 100% weight for it.
914 struct lp_build_context
*c_bld
= &bld
->coord_bld
;
915 LLVMValueRef w1_mask
, w1_weight
;
916 LLVMValueRef half
= lp_build_const_vec(bld
->gallivm
, c_bld
->type
, 0.5f
);
918 w1_mask
= lp_build_cmp(c_bld
, PIPE_FUNC_GREATER
, s_fpart
, half
);
919 /* this select is really just a "and" */
920 w1_weight
= lp_build_select(c_bld
, w1_mask
, c_bld
->one
, c_bld
->zero
);
921 s_fpart
= lp_build_select(c_bld
, linear_mask
, s_fpart
, w1_weight
);
923 w1_mask
= lp_build_cmp(c_bld
, PIPE_FUNC_GREATER
, t_fpart
, half
);
924 w1_weight
= lp_build_select(c_bld
, w1_mask
, c_bld
->one
, c_bld
->zero
);
925 t_fpart
= lp_build_select(c_bld
, linear_mask
, t_fpart
, w1_weight
);
927 w1_mask
= lp_build_cmp(c_bld
, PIPE_FUNC_GREATER
, r_fpart
, half
);
928 w1_weight
= lp_build_select(c_bld
, w1_mask
, c_bld
->one
, c_bld
->zero
);
929 r_fpart
= lp_build_select(c_bld
, linear_mask
, r_fpart
, w1_weight
);
935 * Get texture colors.
937 /* get x0/x1 texels */
938 lp_build_sample_texel_soa(bld
,
939 width_vec
, height_vec
, depth_vec
,
941 row_stride_vec
, img_stride_vec
,
942 data_ptr
, mipoffsets
, neighbors
[0][0]);
943 lp_build_sample_texel_soa(bld
,
944 width_vec
, height_vec
, depth_vec
,
946 row_stride_vec
, img_stride_vec
,
947 data_ptr
, mipoffsets
, neighbors
[0][1]);
950 if (bld
->static_sampler_state
->compare_mode
== PIPE_TEX_COMPARE_NONE
) {
951 /* Interpolate two samples from 1D image to produce one color */
952 for (chan
= 0; chan
< 4; chan
++) {
953 colors_out
[chan
] = lp_build_lerp(&bld
->texel_bld
, s_fpart
,
954 neighbors
[0][0][chan
],
955 neighbors
[0][1][chan
],
960 LLVMValueRef cmpval0
, cmpval1
;
961 cmpval0
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[0][0][0]);
962 cmpval1
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[0][1][0]);
963 /* simplified lerp, AND mask with weight and add */
964 colors_out
[0] = lp_build_masklerp(&bld
->texel_bld
, s_fpart
,
966 colors_out
[1] = colors_out
[2] = colors_out
[3] = colors_out
[0];
971 LLVMValueRef colors0
[4];
973 /* get x0/x1 texels at y1 */
974 lp_build_sample_texel_soa(bld
,
975 width_vec
, height_vec
, depth_vec
,
977 row_stride_vec
, img_stride_vec
,
978 data_ptr
, mipoffsets
, neighbors
[1][0]);
979 lp_build_sample_texel_soa(bld
,
980 width_vec
, height_vec
, depth_vec
,
982 row_stride_vec
, img_stride_vec
,
983 data_ptr
, mipoffsets
, neighbors
[1][1]);
985 if (bld
->static_sampler_state
->compare_mode
== PIPE_TEX_COMPARE_NONE
) {
986 /* Bilinear interpolate the four samples from the 2D image / 3D slice */
987 for (chan
= 0; chan
< 4; chan
++) {
988 colors0
[chan
] = lp_build_lerp_2d(&bld
->texel_bld
,
990 neighbors
[0][0][chan
],
991 neighbors
[0][1][chan
],
992 neighbors
[1][0][chan
],
993 neighbors
[1][1][chan
],
998 LLVMValueRef cmpval00
, cmpval01
, cmpval10
, cmpval11
;
999 cmpval00
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[0][0][0]);
1000 cmpval01
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[0][1][0]);
1001 cmpval10
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[1][0][0]);
1002 cmpval11
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[1][1][0]);
1003 colors0
[0] = lp_build_masklerp2d(&bld
->texel_bld
, s_fpart
, t_fpart
,
1004 cmpval00
, cmpval01
, cmpval10
, cmpval11
);
1005 colors0
[1] = colors0
[2] = colors0
[3] = colors0
[0];
1009 LLVMValueRef neighbors1
[2][2][4];
1010 LLVMValueRef colors1
[4];
1012 /* get x0/x1/y0/y1 texels at z1 */
1013 lp_build_sample_texel_soa(bld
,
1014 width_vec
, height_vec
, depth_vec
,
1016 row_stride_vec
, img_stride_vec
,
1017 data_ptr
, mipoffsets
, neighbors1
[0][0]);
1018 lp_build_sample_texel_soa(bld
,
1019 width_vec
, height_vec
, depth_vec
,
1021 row_stride_vec
, img_stride_vec
,
1022 data_ptr
, mipoffsets
, neighbors1
[0][1]);
1023 lp_build_sample_texel_soa(bld
,
1024 width_vec
, height_vec
, depth_vec
,
1026 row_stride_vec
, img_stride_vec
,
1027 data_ptr
, mipoffsets
, neighbors1
[1][0]);
1028 lp_build_sample_texel_soa(bld
,
1029 width_vec
, height_vec
, depth_vec
,
1031 row_stride_vec
, img_stride_vec
,
1032 data_ptr
, mipoffsets
, neighbors1
[1][1]);
1034 if (bld
->static_sampler_state
->compare_mode
== PIPE_TEX_COMPARE_NONE
) {
1035 /* Bilinear interpolate the four samples from the second Z slice */
1036 for (chan
= 0; chan
< 4; chan
++) {
1037 colors1
[chan
] = lp_build_lerp_2d(&bld
->texel_bld
,
1039 neighbors1
[0][0][chan
],
1040 neighbors1
[0][1][chan
],
1041 neighbors1
[1][0][chan
],
1042 neighbors1
[1][1][chan
],
1045 /* Linearly interpolate the two samples from the two 3D slices */
1046 for (chan
= 0; chan
< 4; chan
++) {
1047 colors_out
[chan
] = lp_build_lerp(&bld
->texel_bld
,
1049 colors0
[chan
], colors1
[chan
],
1054 LLVMValueRef cmpval00
, cmpval01
, cmpval10
, cmpval11
;
1055 cmpval00
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[0][0][0]);
1056 cmpval01
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[0][1][0]);
1057 cmpval10
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[1][0][0]);
1058 cmpval11
= lp_build_sample_comparefunc(bld
, coords
[4], neighbors
[1][1][0]);
1059 colors1
[0] = lp_build_masklerp2d(&bld
->texel_bld
, s_fpart
, t_fpart
,
1060 cmpval00
, cmpval01
, cmpval10
, cmpval11
);
1061 /* Linearly interpolate the two samples from the two 3D slices */
1062 colors_out
[0] = lp_build_lerp(&bld
->texel_bld
,
1064 colors0
[0], colors1
[0],
1066 colors_out
[1] = colors_out
[2] = colors_out
[3] = colors_out
[0];
1071 for (chan
= 0; chan
< 4; chan
++) {
1072 colors_out
[chan
] = colors0
[chan
];
1080 * Sample the texture/mipmap using given image filter and mip filter.
1081 * ilevel0 and ilevel1 indicate the two mipmap levels to sample
1082 * from (vectors or scalars).
1083 * If we're using nearest miplevel sampling the '1' values will be null/unused.
1086 lp_build_sample_mipmap(struct lp_build_sample_context
*bld
,
1087 unsigned img_filter
,
1088 unsigned mip_filter
,
1089 LLVMValueRef
*coords
,
1090 const LLVMValueRef
*offsets
,
1091 LLVMValueRef ilevel0
,
1092 LLVMValueRef ilevel1
,
1093 LLVMValueRef lod_fpart
,
1094 LLVMValueRef
*colors_out
)
1096 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1097 LLVMValueRef size0
= NULL
;
1098 LLVMValueRef size1
= NULL
;
1099 LLVMValueRef row_stride0_vec
= NULL
;
1100 LLVMValueRef row_stride1_vec
= NULL
;
1101 LLVMValueRef img_stride0_vec
= NULL
;
1102 LLVMValueRef img_stride1_vec
= NULL
;
1103 LLVMValueRef data_ptr0
= NULL
;
1104 LLVMValueRef data_ptr1
= NULL
;
1105 LLVMValueRef mipoff0
= NULL
;
1106 LLVMValueRef mipoff1
= NULL
;
1107 LLVMValueRef colors0
[4], colors1
[4];
1110 /* sample the first mipmap level */
1111 lp_build_mipmap_level_sizes(bld
, ilevel0
,
1113 &row_stride0_vec
, &img_stride0_vec
);
1114 if (bld
->num_mips
== 1) {
1115 data_ptr0
= lp_build_get_mipmap_level(bld
, ilevel0
);
1118 /* This path should work for num_lods 1 too but slightly less efficient */
1119 data_ptr0
= bld
->base_ptr
;
1120 mipoff0
= lp_build_get_mip_offsets(bld
, ilevel0
);
1122 if (img_filter
== PIPE_TEX_FILTER_NEAREST
) {
1123 lp_build_sample_image_nearest(bld
, size0
,
1124 row_stride0_vec
, img_stride0_vec
,
1125 data_ptr0
, mipoff0
, coords
, offsets
,
1129 assert(img_filter
== PIPE_TEX_FILTER_LINEAR
);
1130 lp_build_sample_image_linear(bld
, size0
, NULL
,
1131 row_stride0_vec
, img_stride0_vec
,
1132 data_ptr0
, mipoff0
, coords
, offsets
,
1136 /* Store the first level's colors in the output variables */
1137 for (chan
= 0; chan
< 4; chan
++) {
1138 LLVMBuildStore(builder
, colors0
[chan
], colors_out
[chan
]);
1141 if (mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
) {
1142 struct lp_build_if_state if_ctx
;
1143 LLVMValueRef need_lerp
;
1145 /* need_lerp = lod_fpart > 0 */
1146 if (bld
->num_lods
== 1) {
1147 need_lerp
= LLVMBuildFCmp(builder
, LLVMRealUGT
,
1148 lod_fpart
, bld
->lodf_bld
.zero
,
1153 * We'll do mip filtering if any of the quads (or individual
1154 * pixel in case of per-pixel lod) need it.
1155 * It might be better to split the vectors here and only fetch/filter
1156 * quads which need it (if there's one lod per quad).
1158 need_lerp
= lp_build_compare(bld
->gallivm
, bld
->lodf_bld
.type
,
1160 lod_fpart
, bld
->lodf_bld
.zero
);
1161 need_lerp
= lp_build_any_true_range(&bld
->lodi_bld
, bld
->num_lods
, need_lerp
);
1164 lp_build_if(&if_ctx
, bld
->gallivm
, need_lerp
);
1167 * We unfortunately need to clamp lod_fpart here since we can get
1168 * negative values which would screw up filtering if not all
1169 * lod_fpart values have same sign.
1171 lod_fpart
= lp_build_max(&bld
->lodf_bld
, lod_fpart
,
1172 bld
->lodf_bld
.zero
);
1173 /* sample the second mipmap level */
1174 lp_build_mipmap_level_sizes(bld
, ilevel1
,
1176 &row_stride1_vec
, &img_stride1_vec
);
1177 if (bld
->num_mips
== 1) {
1178 data_ptr1
= lp_build_get_mipmap_level(bld
, ilevel1
);
1181 data_ptr1
= bld
->base_ptr
;
1182 mipoff1
= lp_build_get_mip_offsets(bld
, ilevel1
);
1184 if (img_filter
== PIPE_TEX_FILTER_NEAREST
) {
1185 lp_build_sample_image_nearest(bld
, size1
,
1186 row_stride1_vec
, img_stride1_vec
,
1187 data_ptr1
, mipoff1
, coords
, offsets
,
1191 lp_build_sample_image_linear(bld
, size1
, NULL
,
1192 row_stride1_vec
, img_stride1_vec
,
1193 data_ptr1
, mipoff1
, coords
, offsets
,
1197 /* interpolate samples from the two mipmap levels */
1199 if (bld
->num_lods
!= bld
->coord_type
.length
)
1200 lod_fpart
= lp_build_unpack_broadcast_aos_scalars(bld
->gallivm
,
1202 bld
->texel_bld
.type
,
1205 for (chan
= 0; chan
< 4; chan
++) {
1206 colors0
[chan
] = lp_build_lerp(&bld
->texel_bld
, lod_fpart
,
1207 colors0
[chan
], colors1
[chan
],
1209 LLVMBuildStore(builder
, colors0
[chan
], colors_out
[chan
]);
1212 lp_build_endif(&if_ctx
);
1218 * Sample the texture/mipmap using given mip filter, and using
1219 * both nearest and linear filtering at the same time depending
1221 * lod can be per quad but linear_mask is always per pixel.
1222 * ilevel0 and ilevel1 indicate the two mipmap levels to sample
1223 * from (vectors or scalars).
1224 * If we're using nearest miplevel sampling the '1' values will be null/unused.
1227 lp_build_sample_mipmap_both(struct lp_build_sample_context
*bld
,
1228 LLVMValueRef linear_mask
,
1229 unsigned mip_filter
,
1230 LLVMValueRef
*coords
,
1231 const LLVMValueRef
*offsets
,
1232 LLVMValueRef ilevel0
,
1233 LLVMValueRef ilevel1
,
1234 LLVMValueRef lod_fpart
,
1235 LLVMValueRef lod_positive
,
1236 LLVMValueRef
*colors_out
)
1238 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1239 LLVMValueRef size0
= NULL
;
1240 LLVMValueRef size1
= NULL
;
1241 LLVMValueRef row_stride0_vec
= NULL
;
1242 LLVMValueRef row_stride1_vec
= NULL
;
1243 LLVMValueRef img_stride0_vec
= NULL
;
1244 LLVMValueRef img_stride1_vec
= NULL
;
1245 LLVMValueRef data_ptr0
= NULL
;
1246 LLVMValueRef data_ptr1
= NULL
;
1247 LLVMValueRef mipoff0
= NULL
;
1248 LLVMValueRef mipoff1
= NULL
;
1249 LLVMValueRef colors0
[4], colors1
[4];
1252 /* sample the first mipmap level */
1253 lp_build_mipmap_level_sizes(bld
, ilevel0
,
1255 &row_stride0_vec
, &img_stride0_vec
);
1256 if (bld
->num_mips
== 1) {
1257 data_ptr0
= lp_build_get_mipmap_level(bld
, ilevel0
);
1260 /* This path should work for num_lods 1 too but slightly less efficient */
1261 data_ptr0
= bld
->base_ptr
;
1262 mipoff0
= lp_build_get_mip_offsets(bld
, ilevel0
);
1265 lp_build_sample_image_linear(bld
, size0
, linear_mask
,
1266 row_stride0_vec
, img_stride0_vec
,
1267 data_ptr0
, mipoff0
, coords
, offsets
,
1270 /* Store the first level's colors in the output variables */
1271 for (chan
= 0; chan
< 4; chan
++) {
1272 LLVMBuildStore(builder
, colors0
[chan
], colors_out
[chan
]);
1275 if (mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
) {
1276 struct lp_build_if_state if_ctx
;
1277 LLVMValueRef need_lerp
;
1280 * We'll do mip filtering if any of the quads (or individual
1281 * pixel in case of per-pixel lod) need it.
1282 * Note using lod_positive here not lod_fpart since it may be the same
1283 * condition as that used in the outer "if" in the caller hence llvm
1284 * should be able to merge the branches in this case.
1286 need_lerp
= lp_build_any_true_range(&bld
->lodi_bld
, bld
->num_lods
, lod_positive
);
1288 lp_build_if(&if_ctx
, bld
->gallivm
, need_lerp
);
1291 * We unfortunately need to clamp lod_fpart here since we can get
1292 * negative values which would screw up filtering if not all
1293 * lod_fpart values have same sign.
1295 lod_fpart
= lp_build_max(&bld
->lodf_bld
, lod_fpart
,
1296 bld
->lodf_bld
.zero
);
1297 /* sample the second mipmap level */
1298 lp_build_mipmap_level_sizes(bld
, ilevel1
,
1300 &row_stride1_vec
, &img_stride1_vec
);
1301 if (bld
->num_mips
== 1) {
1302 data_ptr1
= lp_build_get_mipmap_level(bld
, ilevel1
);
1305 data_ptr1
= bld
->base_ptr
;
1306 mipoff1
= lp_build_get_mip_offsets(bld
, ilevel1
);
1309 lp_build_sample_image_linear(bld
, size1
, linear_mask
,
1310 row_stride1_vec
, img_stride1_vec
,
1311 data_ptr1
, mipoff1
, coords
, offsets
,
1314 /* interpolate samples from the two mipmap levels */
1316 if (bld
->num_lods
!= bld
->coord_type
.length
)
1317 lod_fpart
= lp_build_unpack_broadcast_aos_scalars(bld
->gallivm
,
1319 bld
->texel_bld
.type
,
1322 for (chan
= 0; chan
< 4; chan
++) {
1323 colors0
[chan
] = lp_build_lerp(&bld
->texel_bld
, lod_fpart
,
1324 colors0
[chan
], colors1
[chan
],
1326 LLVMBuildStore(builder
, colors0
[chan
], colors_out
[chan
]);
1329 lp_build_endif(&if_ctx
);
1335 * Build (per-coord) layer value.
1336 * Either clamp layer to valid values or fill in optional out_of_bounds
1337 * value and just return value unclamped.
1340 lp_build_layer_coord(struct lp_build_sample_context
*bld
,
1341 unsigned texture_unit
,
1343 LLVMValueRef
*out_of_bounds
)
1345 LLVMValueRef num_layers
;
1346 struct lp_build_context
*int_coord_bld
= &bld
->int_coord_bld
;
1348 num_layers
= bld
->dynamic_state
->depth(bld
->dynamic_state
,
1349 bld
->gallivm
, texture_unit
);
1351 if (out_of_bounds
) {
1352 LLVMValueRef out1
, out
;
1353 num_layers
= lp_build_broadcast_scalar(int_coord_bld
, num_layers
);
1354 out
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_LESS
, layer
, int_coord_bld
->zero
);
1355 out1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_GEQUAL
, layer
, num_layers
);
1356 *out_of_bounds
= lp_build_or(int_coord_bld
, out
, out1
);
1360 LLVMValueRef maxlayer
;
1361 maxlayer
= lp_build_sub(&bld
->int_bld
, num_layers
, bld
->int_bld
.one
);
1362 maxlayer
= lp_build_broadcast_scalar(int_coord_bld
, maxlayer
);
1363 return lp_build_clamp(int_coord_bld
, layer
, int_coord_bld
->zero
, maxlayer
);
1369 * Calculate cube face, lod, mip levels.
1372 lp_build_sample_common(struct lp_build_sample_context
*bld
,
1373 unsigned texture_index
,
1374 unsigned sampler_index
,
1375 LLVMValueRef
*coords
,
1376 const struct lp_derivatives
*derivs
, /* optional */
1377 LLVMValueRef lod_bias
, /* optional */
1378 LLVMValueRef explicit_lod
, /* optional */
1379 LLVMValueRef
*lod_pos_or_zero
,
1380 LLVMValueRef
*lod_fpart
,
1381 LLVMValueRef
*ilevel0
,
1382 LLVMValueRef
*ilevel1
)
1384 const unsigned mip_filter
= bld
->static_sampler_state
->min_mip_filter
;
1385 const unsigned min_filter
= bld
->static_sampler_state
->min_img_filter
;
1386 const unsigned mag_filter
= bld
->static_sampler_state
->mag_img_filter
;
1387 const unsigned target
= bld
->static_texture_state
->target
;
1388 LLVMValueRef first_level
, cube_rho
= NULL
;
1389 LLVMValueRef lod_ipart
= NULL
;
1390 struct lp_derivatives cube_derivs
;
1393 printf("%s mip %d min %d mag %d\n", __FUNCTION__,
1394 mip_filter, min_filter, mag_filter);
1398 * Choose cube face, recompute texcoords for the chosen face and
1399 * compute rho here too (as it requires transform of derivatives).
1401 if (target
== PIPE_TEXTURE_CUBE
) {
1402 boolean need_derivs
;
1403 need_derivs
= ((min_filter
!= mag_filter
||
1404 mip_filter
!= PIPE_TEX_MIPFILTER_NONE
) &&
1405 !bld
->static_sampler_state
->min_max_lod_equal
&&
1407 lp_build_cube_lookup(bld
, coords
, derivs
, &cube_rho
, &cube_derivs
, need_derivs
);
1408 derivs
= &cube_derivs
;
1410 else if (target
== PIPE_TEXTURE_1D_ARRAY
||
1411 target
== PIPE_TEXTURE_2D_ARRAY
) {
1412 coords
[2] = lp_build_iround(&bld
->coord_bld
, coords
[2]);
1413 coords
[2] = lp_build_layer_coord(bld
, texture_index
, coords
[2], NULL
);
1416 if (bld
->static_sampler_state
->compare_mode
!= PIPE_TEX_COMPARE_NONE
) {
1418 * Clamp p coords to [0,1] for fixed function depth texture format here.
1419 * Technically this is not entirely correct for unorm depth as the ref value
1420 * should be converted to the depth format (quantization!) and comparison
1421 * then done in texture format. This would actually help performance (since
1422 * only need to do it once and could save the per-sample conversion of texels
1423 * to floats instead), but it would need more messy code (would need to push
1424 * at least some bits down to actual fetch so conversion could be skipped,
1425 * and would have ugly interaction with border color, would need to convert
1426 * border color to that format too or do some other tricks to make it work).
1428 const struct util_format_description
*format_desc
= bld
->format_desc
;
1430 /* not entirely sure we couldn't end up with non-valid swizzle here */
1431 chan_type
= format_desc
->swizzle
[0] <= UTIL_FORMAT_SWIZZLE_W
?
1432 format_desc
->channel
[format_desc
->swizzle
[0]].type
:
1433 UTIL_FORMAT_TYPE_FLOAT
;
1434 if (chan_type
!= UTIL_FORMAT_TYPE_FLOAT
) {
1435 coords
[4] = lp_build_clamp(&bld
->coord_bld
, coords
[4],
1436 bld
->coord_bld
.zero
, bld
->coord_bld
.one
);
1441 * Compute the level of detail (float).
1443 if (min_filter
!= mag_filter
||
1444 mip_filter
!= PIPE_TEX_MIPFILTER_NONE
) {
1445 /* Need to compute lod either to choose mipmap levels or to
1446 * distinguish between minification/magnification with one mipmap level.
1448 lp_build_lod_selector(bld
, texture_index
, sampler_index
,
1449 coords
[0], coords
[1], coords
[2], cube_rho
,
1450 derivs
, lod_bias
, explicit_lod
,
1452 &lod_ipart
, lod_fpart
, lod_pos_or_zero
);
1454 lod_ipart
= bld
->lodi_bld
.zero
;
1455 *lod_pos_or_zero
= bld
->lodi_bld
.zero
;
1458 if (bld
->num_lods
!= bld
->num_mips
) {
1459 /* only makes sense if there's just a single mip level */
1460 assert(bld
->num_mips
== 1);
1461 lod_ipart
= lp_build_extract_range(bld
->gallivm
, lod_ipart
, 0, 1);
1465 * Compute integer mipmap level(s) to fetch texels from: ilevel0, ilevel1
1467 switch (mip_filter
) {
1469 assert(0 && "bad mip_filter value in lp_build_sample_soa()");
1471 case PIPE_TEX_MIPFILTER_NONE
:
1472 /* always use mip level 0 */
1473 if (HAVE_LLVM
== 0x0207 && target
== PIPE_TEXTURE_CUBE
) {
1474 /* XXX this is a work-around for an apparent bug in LLVM 2.7.
1475 * We should be able to set ilevel0 = const(0) but that causes
1476 * bad x86 code to be emitted.
1479 lp_build_nearest_mip_level(bld
, texture_index
, lod_ipart
, ilevel0
, NULL
);
1482 first_level
= bld
->dynamic_state
->first_level(bld
->dynamic_state
,
1483 bld
->gallivm
, texture_index
);
1484 first_level
= lp_build_broadcast_scalar(&bld
->leveli_bld
, first_level
);
1485 *ilevel0
= first_level
;
1488 case PIPE_TEX_MIPFILTER_NEAREST
:
1490 lp_build_nearest_mip_level(bld
, texture_index
, lod_ipart
, ilevel0
, NULL
);
1492 case PIPE_TEX_MIPFILTER_LINEAR
:
1495 lp_build_linear_mip_levels(bld
, texture_index
,
1496 lod_ipart
, lod_fpart
,
1503 lp_build_clamp_border_color(struct lp_build_sample_context
*bld
,
1504 unsigned sampler_unit
)
1506 struct gallivm_state
*gallivm
= bld
->gallivm
;
1507 LLVMBuilderRef builder
= gallivm
->builder
;
1508 LLVMValueRef border_color_ptr
=
1509 bld
->dynamic_state
->border_color(bld
->dynamic_state
,
1510 gallivm
, sampler_unit
);
1511 LLVMValueRef border_color
;
1512 const struct util_format_description
*format_desc
= bld
->format_desc
;
1513 struct lp_type vec4_type
= bld
->texel_type
;
1514 struct lp_build_context vec4_bld
;
1515 LLVMValueRef min_clamp
= NULL
;
1516 LLVMValueRef max_clamp
= NULL
;
1519 * For normalized format need to clamp border color (technically
1520 * probably should also quantize the data). Really sucks doing this
1521 * here but can't avoid at least for now since this is part of
1522 * sampler state and texture format is part of sampler_view state.
1523 * GL expects also expects clamping for uint/sint formats too so
1524 * do that as well (d3d10 can't end up here with uint/sint since it
1525 * only supports them with ld).
1527 vec4_type
.length
= 4;
1528 lp_build_context_init(&vec4_bld
, gallivm
, vec4_type
);
1531 * Vectorized clamping of border color. Loading is a bit of a hack since
1532 * we just cast the pointer to float array to pointer to vec4
1535 border_color_ptr
= lp_build_array_get_ptr(gallivm
, border_color_ptr
,
1536 lp_build_const_int32(gallivm
, 0));
1537 border_color_ptr
= LLVMBuildBitCast(builder
, border_color_ptr
,
1538 LLVMPointerType(vec4_bld
.vec_type
, 0), "");
1539 border_color
= LLVMBuildLoad(builder
, border_color_ptr
, "");
1540 /* we don't have aligned type in the dynamic state unfortunately */
1541 lp_set_load_alignment(border_color
, 4);
1544 * Instead of having some incredibly complex logic which will try to figure out
1545 * clamping necessary for each channel, simply use the first channel, and treat
1546 * mixed signed/unsigned normalized formats specially.
1547 * (Mixed non-normalized, which wouldn't work at all here, do not exist for a
1550 if (format_desc
->layout
== UTIL_FORMAT_LAYOUT_PLAIN
) {
1552 /* d/s needs special handling because both present means just sampling depth */
1553 if (util_format_is_depth_and_stencil(format_desc
->format
)) {
1554 chan
= format_desc
->swizzle
[0];
1557 chan
= util_format_get_first_non_void_channel(format_desc
->format
);
1559 if (chan
>= 0 && chan
<= UTIL_FORMAT_SWIZZLE_W
) {
1560 unsigned chan_type
= format_desc
->channel
[chan
].type
;
1561 unsigned chan_norm
= format_desc
->channel
[chan
].normalized
;
1562 unsigned chan_pure
= format_desc
->channel
[chan
].pure_integer
;
1563 if (chan_type
== UTIL_FORMAT_TYPE_SIGNED
) {
1565 min_clamp
= lp_build_const_vec(gallivm
, vec4_type
, -1.0F
);
1566 max_clamp
= vec4_bld
.one
;
1568 else if (chan_pure
) {
1570 * Border color was stored as int, hence need min/max clamp
1571 * only if chan has less than 32 bits..
1573 unsigned chan_size
= format_desc
->channel
[chan
].size
;
1574 if (chan_size
< 32) {
1575 min_clamp
= lp_build_const_int_vec(gallivm
, vec4_type
,
1576 0 - (1 << (chan_size
- 1)));
1577 max_clamp
= lp_build_const_int_vec(gallivm
, vec4_type
,
1578 (1 << (chan_size
- 1)) - 1);
1581 /* TODO: no idea about non-pure, non-normalized! */
1583 else if (chan_type
== UTIL_FORMAT_TYPE_UNSIGNED
) {
1585 min_clamp
= vec4_bld
.zero
;
1586 max_clamp
= vec4_bld
.one
;
1589 * Need a ugly hack here, because we don't have Z32_FLOAT_X8X24
1590 * we use Z32_FLOAT_S8X24 to imply sampling depth component
1591 * and ignoring stencil, which will blow up here if we try to
1592 * do a uint clamp in a float texel build...
1593 * And even if we had that format, mesa st also thinks using z24s8
1594 * means depth sampling ignoring stencil.
1596 else if (chan_pure
) {
1598 * Border color was stored as uint, hence never need min
1599 * clamp, and only need max clamp if chan has less than 32 bits.
1601 unsigned chan_size
= format_desc
->channel
[chan
].size
;
1602 if (chan_size
< 32) {
1603 max_clamp
= lp_build_const_int_vec(gallivm
, vec4_type
,
1604 (1 << chan_size
) - 1);
1606 /* TODO: no idea about non-pure, non-normalized! */
1609 else if (chan_type
== UTIL_FORMAT_TYPE_FIXED
) {
1610 /* TODO: I have no idea what clamp this would need if any! */
1613 /* mixed plain formats (or different pure size) */
1614 switch (format_desc
->format
) {
1615 case PIPE_FORMAT_B10G10R10A2_UINT
:
1616 case PIPE_FORMAT_R10G10B10A2_UINT
:
1618 unsigned max10
= (1 << 10) - 1;
1619 max_clamp
= lp_build_const_aos(gallivm
, vec4_type
, max10
, max10
,
1620 max10
, (1 << 2) - 1, NULL
);
1623 case PIPE_FORMAT_R10SG10SB10SA2U_NORM
:
1624 min_clamp
= lp_build_const_aos(gallivm
, vec4_type
, -1.0F
, -1.0F
,
1626 max_clamp
= vec4_bld
.one
;
1628 case PIPE_FORMAT_R8SG8SB8UX8U_NORM
:
1629 case PIPE_FORMAT_R5SG5SB6U_NORM
:
1630 min_clamp
= lp_build_const_aos(gallivm
, vec4_type
, -1.0F
, -1.0F
,
1632 max_clamp
= vec4_bld
.one
;
1639 /* cannot figure this out from format description */
1640 if (format_desc
->layout
== UTIL_FORMAT_LAYOUT_S3TC
) {
1641 /* s3tc formats are always unorm */
1642 min_clamp
= vec4_bld
.zero
;
1643 max_clamp
= vec4_bld
.one
;
1645 else if (format_desc
->layout
== UTIL_FORMAT_LAYOUT_RGTC
||
1646 format_desc
->layout
== UTIL_FORMAT_LAYOUT_ETC
) {
1647 switch (format_desc
->format
) {
1648 case PIPE_FORMAT_RGTC1_UNORM
:
1649 case PIPE_FORMAT_RGTC2_UNORM
:
1650 case PIPE_FORMAT_LATC1_UNORM
:
1651 case PIPE_FORMAT_LATC2_UNORM
:
1652 case PIPE_FORMAT_ETC1_RGB8
:
1653 min_clamp
= vec4_bld
.zero
;
1654 max_clamp
= vec4_bld
.one
;
1656 case PIPE_FORMAT_RGTC1_SNORM
:
1657 case PIPE_FORMAT_RGTC2_SNORM
:
1658 case PIPE_FORMAT_LATC1_SNORM
:
1659 case PIPE_FORMAT_LATC2_SNORM
:
1660 min_clamp
= lp_build_const_vec(gallivm
, vec4_type
, -1.0F
);
1661 max_clamp
= vec4_bld
.one
;
1669 * all others from subsampled/other group, though we don't care
1670 * about yuv (and should not have any from zs here)
1672 else if (format_desc
->colorspace
!= UTIL_FORMAT_COLORSPACE_YUV
){
1673 switch (format_desc
->format
) {
1674 case PIPE_FORMAT_R8G8_B8G8_UNORM
:
1675 case PIPE_FORMAT_G8R8_G8B8_UNORM
:
1676 case PIPE_FORMAT_G8R8_B8R8_UNORM
:
1677 case PIPE_FORMAT_R8G8_R8B8_UNORM
:
1678 case PIPE_FORMAT_R1_UNORM
: /* doesn't make sense but ah well */
1679 min_clamp
= vec4_bld
.zero
;
1680 max_clamp
= vec4_bld
.one
;
1682 case PIPE_FORMAT_R8G8Bx_SNORM
:
1683 min_clamp
= lp_build_const_vec(gallivm
, vec4_type
, -1.0F
);
1684 max_clamp
= vec4_bld
.one
;
1687 * Note smallfloat formats usually don't need clamping
1688 * (they still have infinite range) however this is not
1689 * true for r11g11b10 and r9g9b9e5, which can't represent
1690 * negative numbers (and additionally r9g9b9e5 can't represent
1691 * very large numbers). d3d10 seems happy without clamping in
1692 * this case, but gl spec is pretty clear: "for floating
1693 * point and integer formats, border values are clamped to
1694 * the representable range of the format" so do that here.
1696 case PIPE_FORMAT_R11G11B10_FLOAT
:
1697 min_clamp
= vec4_bld
.zero
;
1699 case PIPE_FORMAT_R9G9B9E5_FLOAT
:
1700 min_clamp
= vec4_bld
.zero
;
1701 max_clamp
= lp_build_const_vec(gallivm
, vec4_type
, MAX_RGB9E5
);
1711 border_color
= lp_build_max(&vec4_bld
, border_color
, min_clamp
);
1714 border_color
= lp_build_min(&vec4_bld
, border_color
, max_clamp
);
1717 bld
->border_color_clamped
= border_color
;
1722 * General texture sampling codegen.
1723 * This function handles texture sampling for all texture targets (1D,
1724 * 2D, 3D, cube) and all filtering modes.
1727 lp_build_sample_general(struct lp_build_sample_context
*bld
,
1728 unsigned sampler_unit
,
1729 LLVMValueRef
*coords
,
1730 const LLVMValueRef
*offsets
,
1731 LLVMValueRef lod_positive
,
1732 LLVMValueRef lod_fpart
,
1733 LLVMValueRef ilevel0
,
1734 LLVMValueRef ilevel1
,
1735 LLVMValueRef
*colors_out
)
1737 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1738 const struct lp_static_sampler_state
*sampler_state
= bld
->static_sampler_state
;
1739 const unsigned mip_filter
= sampler_state
->min_mip_filter
;
1740 const unsigned min_filter
= sampler_state
->min_img_filter
;
1741 const unsigned mag_filter
= sampler_state
->mag_img_filter
;
1742 LLVMValueRef texels
[4];
1745 /* if we need border color, (potentially) clamp it now */
1746 if (lp_sampler_wrap_mode_uses_border_color(sampler_state
->wrap_s
,
1750 lp_sampler_wrap_mode_uses_border_color(sampler_state
->wrap_t
,
1754 lp_sampler_wrap_mode_uses_border_color(sampler_state
->wrap_r
,
1757 lp_build_clamp_border_color(bld
, sampler_unit
);
1762 * Get/interpolate texture colors.
1765 for (chan
= 0; chan
< 4; ++chan
) {
1766 texels
[chan
] = lp_build_alloca(bld
->gallivm
, bld
->texel_bld
.vec_type
, "");
1767 lp_build_name(texels
[chan
], "sampler%u_texel_%c_var", sampler_unit
, "xyzw"[chan
]);
1770 if (min_filter
== mag_filter
) {
1771 /* no need to distinguish between minification and magnification */
1772 lp_build_sample_mipmap(bld
, min_filter
, mip_filter
,
1774 ilevel0
, ilevel1
, lod_fpart
,
1779 * Could also get rid of the if-logic and always use mipmap_both, both
1780 * for the single lod and multi-lod case if nothing really uses this.
1782 if (bld
->num_lods
== 1) {
1783 /* Emit conditional to choose min image filter or mag image filter
1784 * depending on the lod being > 0 or <= 0, respectively.
1786 struct lp_build_if_state if_ctx
;
1788 lod_positive
= LLVMBuildTrunc(builder
, lod_positive
,
1789 LLVMInt1TypeInContext(bld
->gallivm
->context
), "");
1791 lp_build_if(&if_ctx
, bld
->gallivm
, lod_positive
);
1793 /* Use the minification filter */
1794 lp_build_sample_mipmap(bld
, min_filter
, mip_filter
,
1796 ilevel0
, ilevel1
, lod_fpart
,
1799 lp_build_else(&if_ctx
);
1801 /* Use the magnification filter */
1802 lp_build_sample_mipmap(bld
, mag_filter
, PIPE_TEX_MIPFILTER_NONE
,
1804 ilevel0
, NULL
, NULL
,
1807 lp_build_endif(&if_ctx
);
1810 LLVMValueRef need_linear
, linear_mask
;
1811 unsigned mip_filter_for_nearest
;
1812 struct lp_build_if_state if_ctx
;
1814 if (min_filter
== PIPE_TEX_FILTER_LINEAR
) {
1815 linear_mask
= lod_positive
;
1816 mip_filter_for_nearest
= PIPE_TEX_MIPFILTER_NONE
;
1819 linear_mask
= lp_build_not(&bld
->lodi_bld
, lod_positive
);
1820 mip_filter_for_nearest
= mip_filter
;
1822 need_linear
= lp_build_any_true_range(&bld
->lodi_bld
, bld
->num_lods
,
1825 if (bld
->num_lods
!= bld
->coord_type
.length
) {
1826 linear_mask
= lp_build_unpack_broadcast_aos_scalars(bld
->gallivm
,
1828 bld
->int_coord_type
,
1832 lp_build_if(&if_ctx
, bld
->gallivm
, need_linear
);
1835 * Do sampling with both filters simultaneously. This means using
1836 * a linear filter and doing some tricks (with weights) for the pixels
1837 * which need nearest filter.
1838 * Note that it's probably rare some pixels need nearest and some
1839 * linear filter but the fixups required for the nearest pixels
1840 * aren't all that complicated so just always run a combined path
1841 * if at least some pixels require linear.
1843 lp_build_sample_mipmap_both(bld
, linear_mask
, mip_filter
,
1846 lod_fpart
, lod_positive
,
1849 lp_build_else(&if_ctx
);
1852 * All pixels require just nearest filtering, which is way
1853 * cheaper than linear, hence do a separate path for that.
1855 lp_build_sample_mipmap(bld
, PIPE_TEX_FILTER_NEAREST
,
1856 mip_filter_for_nearest
,
1858 ilevel0
, ilevel1
, lod_fpart
,
1861 lp_build_endif(&if_ctx
);
1865 for (chan
= 0; chan
< 4; ++chan
) {
1866 colors_out
[chan
] = LLVMBuildLoad(builder
, texels
[chan
], "");
1867 lp_build_name(colors_out
[chan
], "sampler%u_texel_%c", sampler_unit
, "xyzw"[chan
]);
1873 * Texel fetch function.
1874 * In contrast to general sampling there is no filtering, no coord minification,
1875 * lod (if any) is always explicit uint, coords are uints (in terms of texel units)
1876 * directly to be applied to the selected mip level (after adding texel offsets).
1877 * This function handles texel fetch for all targets where texel fetch is supported
1878 * (no cube maps, but 1d, 2d, 3d are supported, arrays and buffers should be too).
1881 lp_build_fetch_texel(struct lp_build_sample_context
*bld
,
1882 unsigned texture_unit
,
1883 const LLVMValueRef
*coords
,
1884 LLVMValueRef explicit_lod
,
1885 const LLVMValueRef
*offsets
,
1886 LLVMValueRef
*colors_out
)
1888 struct lp_build_context
*perquadi_bld
= &bld
->lodi_bld
;
1889 struct lp_build_context
*int_coord_bld
= &bld
->int_coord_bld
;
1890 unsigned dims
= bld
->dims
, chan
;
1891 unsigned target
= bld
->static_texture_state
->target
;
1892 boolean out_of_bound_ret_zero
= TRUE
;
1893 LLVMValueRef size
, ilevel
;
1894 LLVMValueRef row_stride_vec
= NULL
, img_stride_vec
= NULL
;
1895 LLVMValueRef x
= coords
[0], y
= coords
[1], z
= coords
[2];
1896 LLVMValueRef width
, height
, depth
, i
, j
;
1897 LLVMValueRef offset
, out_of_bounds
, out1
;
1899 out_of_bounds
= int_coord_bld
->zero
;
1901 if (explicit_lod
&& bld
->static_texture_state
->target
!= PIPE_BUFFER
) {
1902 if (bld
->num_mips
!= int_coord_bld
->type
.length
) {
1903 ilevel
= lp_build_pack_aos_scalars(bld
->gallivm
, int_coord_bld
->type
,
1904 perquadi_bld
->type
, explicit_lod
, 0);
1907 ilevel
= explicit_lod
;
1909 lp_build_nearest_mip_level(bld
, texture_unit
, ilevel
, &ilevel
,
1910 out_of_bound_ret_zero
? &out_of_bounds
: NULL
);
1913 assert(bld
->num_mips
== 1);
1914 if (bld
->static_texture_state
->target
!= PIPE_BUFFER
) {
1915 ilevel
= bld
->dynamic_state
->first_level(bld
->dynamic_state
,
1916 bld
->gallivm
, texture_unit
);
1919 ilevel
= lp_build_const_int32(bld
->gallivm
, 0);
1922 lp_build_mipmap_level_sizes(bld
, ilevel
,
1924 &row_stride_vec
, &img_stride_vec
);
1925 lp_build_extract_image_sizes(bld
, &bld
->int_size_bld
, int_coord_bld
->type
,
1926 size
, &width
, &height
, &depth
);
1928 if (target
== PIPE_TEXTURE_1D_ARRAY
||
1929 target
== PIPE_TEXTURE_2D_ARRAY
) {
1930 if (out_of_bound_ret_zero
) {
1931 z
= lp_build_layer_coord(bld
, texture_unit
, z
, &out1
);
1932 out_of_bounds
= lp_build_or(int_coord_bld
, out_of_bounds
, out1
);
1935 z
= lp_build_layer_coord(bld
, texture_unit
, z
, NULL
);
1939 /* This is a lot like border sampling */
1942 * coords are really unsigned, offsets are signed, but I don't think
1943 * exceeding 31 bits is possible
1945 x
= lp_build_add(int_coord_bld
, x
, offsets
[0]);
1947 out1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_LESS
, x
, int_coord_bld
->zero
);
1948 out_of_bounds
= lp_build_or(int_coord_bld
, out_of_bounds
, out1
);
1949 out1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_GEQUAL
, x
, width
);
1950 out_of_bounds
= lp_build_or(int_coord_bld
, out_of_bounds
, out1
);
1954 y
= lp_build_add(int_coord_bld
, y
, offsets
[1]);
1956 out1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_LESS
, y
, int_coord_bld
->zero
);
1957 out_of_bounds
= lp_build_or(int_coord_bld
, out_of_bounds
, out1
);
1958 out1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_GEQUAL
, y
, height
);
1959 out_of_bounds
= lp_build_or(int_coord_bld
, out_of_bounds
, out1
);
1963 z
= lp_build_add(int_coord_bld
, z
, offsets
[2]);
1965 out1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_LESS
, z
, int_coord_bld
->zero
);
1966 out_of_bounds
= lp_build_or(int_coord_bld
, out_of_bounds
, out1
);
1967 out1
= lp_build_cmp(int_coord_bld
, PIPE_FUNC_GEQUAL
, z
, depth
);
1968 out_of_bounds
= lp_build_or(int_coord_bld
, out_of_bounds
, out1
);
1972 lp_build_sample_offset(int_coord_bld
,
1974 x
, y
, z
, row_stride_vec
, img_stride_vec
,
1977 if (bld
->static_texture_state
->target
!= PIPE_BUFFER
) {
1978 offset
= lp_build_add(int_coord_bld
, offset
,
1979 lp_build_get_mip_offsets(bld
, ilevel
));
1982 offset
= lp_build_andnot(int_coord_bld
, offset
, out_of_bounds
);
1984 lp_build_fetch_rgba_soa(bld
->gallivm
,
1987 bld
->base_ptr
, offset
,
1991 if (out_of_bound_ret_zero
) {
1993 * Only needed for ARB_robust_buffer_access_behavior and d3d10.
1994 * Could use min/max above instead of out-of-bounds comparisons
1995 * if we don't care about the result returned for out-of-bounds.
1997 for (chan
= 0; chan
< 4; chan
++) {
1998 colors_out
[chan
] = lp_build_select(&bld
->texel_bld
, out_of_bounds
,
1999 bld
->texel_bld
.zero
, colors_out
[chan
]);
2006 * Just set texels to white instead of actually sampling the texture.
2010 lp_build_sample_nop(struct gallivm_state
*gallivm
,
2011 struct lp_type type
,
2012 const LLVMValueRef
*coords
,
2013 LLVMValueRef texel_out
[4])
2015 LLVMValueRef one
= lp_build_one(gallivm
, type
);
2018 for (chan
= 0; chan
< 4; chan
++) {
2019 texel_out
[chan
] = one
;
2025 * Build texture sampling code.
2026 * 'texel' will return a vector of four LLVMValueRefs corresponding to
2028 * \param type vector float type to use for coords, etc.
2029 * \param is_fetch if this is a texel fetch instruction.
2030 * \param derivs partial derivatives of (s,t,r,q) with respect to x and y
2033 lp_build_sample_soa(struct gallivm_state
*gallivm
,
2034 const struct lp_static_texture_state
*static_texture_state
,
2035 const struct lp_static_sampler_state
*static_sampler_state
,
2036 struct lp_sampler_dynamic_state
*dynamic_state
,
2037 struct lp_type type
,
2039 unsigned texture_index
,
2040 unsigned sampler_index
,
2041 const LLVMValueRef
*coords
,
2042 const LLVMValueRef
*offsets
,
2043 const struct lp_derivatives
*derivs
, /* optional */
2044 LLVMValueRef lod_bias
, /* optional */
2045 LLVMValueRef explicit_lod
, /* optional */
2046 enum lp_sampler_lod_property lod_property
,
2047 LLVMValueRef texel_out
[4])
2049 unsigned target
= static_texture_state
->target
;
2050 unsigned dims
= texture_dims(target
);
2051 unsigned num_quads
= type
.length
/ 4;
2052 unsigned mip_filter
, min_img_filter
, mag_img_filter
, i
;
2053 struct lp_build_sample_context bld
;
2054 struct lp_static_sampler_state derived_sampler_state
= *static_sampler_state
;
2055 LLVMTypeRef i32t
= LLVMInt32TypeInContext(gallivm
->context
);
2056 LLVMBuilderRef builder
= gallivm
->builder
;
2057 LLVMValueRef tex_width
, newcoords
[5];
2060 enum pipe_format fmt
= static_texture_state
->format
;
2061 debug_printf("Sample from %s\n", util_format_name(fmt
));
2064 if (static_texture_state
->format
== PIPE_FORMAT_NONE
) {
2066 * If there's nothing bound, format is NONE, and we must return
2067 * all zero as mandated by d3d10 in this case.
2070 LLVMValueRef zero
= lp_build_const_vec(gallivm
, type
, 0.0F
);
2071 for (chan
= 0; chan
< 4; chan
++) {
2072 texel_out
[chan
] = zero
;
2077 assert(type
.floating
);
2079 /* Setup our build context */
2080 memset(&bld
, 0, sizeof bld
);
2081 bld
.gallivm
= gallivm
;
2082 bld
.static_sampler_state
= &derived_sampler_state
;
2083 bld
.static_texture_state
= static_texture_state
;
2084 bld
.dynamic_state
= dynamic_state
;
2085 bld
.format_desc
= util_format_description(static_texture_state
->format
);
2088 bld
.vector_width
= lp_type_width(type
);
2090 bld
.float_type
= lp_type_float(32);
2091 bld
.int_type
= lp_type_int(32);
2092 bld
.coord_type
= type
;
2093 bld
.int_coord_type
= lp_int_type(type
);
2094 bld
.float_size_in_type
= lp_type_float(32);
2095 bld
.float_size_in_type
.length
= dims
> 1 ? 4 : 1;
2096 bld
.int_size_in_type
= lp_int_type(bld
.float_size_in_type
);
2097 bld
.texel_type
= type
;
2099 /* always using the first channel hopefully should be safe,
2100 * if not things WILL break in other places anyway.
2102 if (bld
.format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_RGB
&&
2103 bld
.format_desc
->channel
[0].pure_integer
) {
2104 if (bld
.format_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_SIGNED
) {
2105 bld
.texel_type
= lp_type_int_vec(type
.width
, type
.width
* type
.length
);
2107 else if (bld
.format_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_UNSIGNED
) {
2108 bld
.texel_type
= lp_type_uint_vec(type
.width
, type
.width
* type
.length
);
2111 else if (util_format_has_stencil(bld
.format_desc
) &&
2112 !util_format_has_depth(bld
.format_desc
)) {
2113 /* for stencil only formats, sample stencil (uint) */
2114 bld
.texel_type
= lp_type_int_vec(type
.width
, type
.width
* type
.length
);
2117 if (!static_texture_state
->level_zero_only
) {
2118 derived_sampler_state
.min_mip_filter
= static_sampler_state
->min_mip_filter
;
2120 derived_sampler_state
.min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
2122 mip_filter
= derived_sampler_state
.min_mip_filter
;
2125 debug_printf(" .min_mip_filter = %u\n", derived_sampler_state
.min_mip_filter
);
2128 if (static_texture_state
->target
== PIPE_TEXTURE_CUBE
||
2129 static_texture_state
->target
== PIPE_TEXTURE_CUBE_ARRAY
)
2132 * Seamless filtering ignores wrap modes.
2133 * Setting to CLAMP_TO_EDGE is correct for nearest filtering, for
2134 * bilinear it's not correct but way better than using for instance repeat.
2135 * Note we even set this for non-seamless. Technically GL allows any wrap
2136 * mode, which made sense when supporting true borders (can get seamless
2137 * effect with border and CLAMP_TO_BORDER), but gallium doesn't support
2138 * borders and d3d9 requires wrap modes to be ignored and it's a pain to fix
2139 * up the sampler state (as it makes it texture dependent).
2141 derived_sampler_state
.wrap_s
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
2142 derived_sampler_state
.wrap_t
= PIPE_TEX_WRAP_CLAMP_TO_EDGE
;
2145 min_img_filter
= derived_sampler_state
.min_img_filter
;
2146 mag_img_filter
= derived_sampler_state
.mag_img_filter
;
2150 * This is all a bit complicated different paths are chosen for performance
2152 * Essentially, there can be 1 lod per element, 1 lod per quad or 1 lod for
2153 * everything (the last two options are equivalent for 4-wide case).
2154 * If there's per-quad lod but we split to 4-wide so we can use AoS, per-quad
2155 * lod is calculated then the lod value extracted afterwards so making this
2156 * case basically the same as far as lod handling is concerned for the
2157 * further sample/filter code as the 1 lod for everything case.
2158 * Different lod handling mostly shows up when building mipmap sizes
2159 * (lp_build_mipmap_level_sizes() and friends) and also in filtering
2160 * (getting the fractional part of the lod to the right texels).
2164 * There are other situations where at least the multiple int lods could be
2165 * avoided like min and max lod being equal.
2167 bld
.num_mips
= bld
.num_lods
= 1;
2169 if ((gallivm_debug
& GALLIVM_DEBUG_NO_QUAD_LOD
) &&
2170 (gallivm_debug
& GALLIVM_DEBUG_NO_RHO_APPROX
) &&
2171 (static_texture_state
->target
== PIPE_TEXTURE_CUBE
) &&
2172 (!is_fetch
&& mip_filter
!= PIPE_TEX_MIPFILTER_NONE
)) {
2174 * special case for using per-pixel lod even for implicit lod,
2175 * which is generally never required (ok by APIs) except to please
2176 * some (somewhat broken imho) tests (because per-pixel face selection
2177 * can cause derivatives to be different for pixels outside the primitive
2178 * due to the major axis division even if pre-project derivatives are
2181 bld
.num_mips
= type
.length
;
2182 bld
.num_lods
= type
.length
;
2184 else if (lod_property
== LP_SAMPLER_LOD_PER_ELEMENT
||
2185 (explicit_lod
|| lod_bias
|| derivs
)) {
2186 if ((is_fetch
&& target
!= PIPE_BUFFER
) ||
2187 (!is_fetch
&& mip_filter
!= PIPE_TEX_MIPFILTER_NONE
)) {
2188 bld
.num_mips
= type
.length
;
2189 bld
.num_lods
= type
.length
;
2191 else if (!is_fetch
&& min_img_filter
!= mag_img_filter
) {
2193 bld
.num_lods
= type
.length
;
2196 /* TODO: for true scalar_lod should only use 1 lod value */
2197 else if ((is_fetch
&& explicit_lod
&& target
!= PIPE_BUFFER
) ||
2198 (!is_fetch
&& mip_filter
!= PIPE_TEX_MIPFILTER_NONE
)) {
2199 bld
.num_mips
= num_quads
;
2200 bld
.num_lods
= num_quads
;
2202 else if (!is_fetch
&& min_img_filter
!= mag_img_filter
) {
2204 bld
.num_lods
= num_quads
;
2208 bld
.lodf_type
= type
;
2209 /* we want native vector size to be able to use our intrinsics */
2210 if (bld
.num_lods
!= type
.length
) {
2211 /* TODO: this currently always has to be per-quad or per-element */
2212 bld
.lodf_type
.length
= type
.length
> 4 ? ((type
.length
+ 15) / 16) * 4 : 1;
2214 bld
.lodi_type
= lp_int_type(bld
.lodf_type
);
2215 bld
.levelf_type
= bld
.lodf_type
;
2216 if (bld
.num_mips
== 1) {
2217 bld
.levelf_type
.length
= 1;
2219 bld
.leveli_type
= lp_int_type(bld
.levelf_type
);
2220 bld
.float_size_type
= bld
.float_size_in_type
;
2221 /* Note: size vectors may not be native. They contain minified w/h/d/_ values,
2222 * with per-element lod that is w0/h0/d0/_/w1/h1/d1_/... so up to 8x4f32 */
2223 if (bld
.num_mips
> 1) {
2224 bld
.float_size_type
.length
= bld
.num_mips
== type
.length
?
2225 bld
.num_mips
* bld
.float_size_in_type
.length
:
2228 bld
.int_size_type
= lp_int_type(bld
.float_size_type
);
2230 lp_build_context_init(&bld
.float_bld
, gallivm
, bld
.float_type
);
2231 lp_build_context_init(&bld
.float_vec_bld
, gallivm
, type
);
2232 lp_build_context_init(&bld
.int_bld
, gallivm
, bld
.int_type
);
2233 lp_build_context_init(&bld
.coord_bld
, gallivm
, bld
.coord_type
);
2234 lp_build_context_init(&bld
.int_coord_bld
, gallivm
, bld
.int_coord_type
);
2235 lp_build_context_init(&bld
.int_size_in_bld
, gallivm
, bld
.int_size_in_type
);
2236 lp_build_context_init(&bld
.float_size_in_bld
, gallivm
, bld
.float_size_in_type
);
2237 lp_build_context_init(&bld
.int_size_bld
, gallivm
, bld
.int_size_type
);
2238 lp_build_context_init(&bld
.float_size_bld
, gallivm
, bld
.float_size_type
);
2239 lp_build_context_init(&bld
.texel_bld
, gallivm
, bld
.texel_type
);
2240 lp_build_context_init(&bld
.levelf_bld
, gallivm
, bld
.levelf_type
);
2241 lp_build_context_init(&bld
.leveli_bld
, gallivm
, bld
.leveli_type
);
2242 lp_build_context_init(&bld
.lodf_bld
, gallivm
, bld
.lodf_type
);
2243 lp_build_context_init(&bld
.lodi_bld
, gallivm
, bld
.lodi_type
);
2245 /* Get the dynamic state */
2246 tex_width
= dynamic_state
->width(dynamic_state
, gallivm
, texture_index
);
2247 bld
.row_stride_array
= dynamic_state
->row_stride(dynamic_state
, gallivm
, texture_index
);
2248 bld
.img_stride_array
= dynamic_state
->img_stride(dynamic_state
, gallivm
, texture_index
);
2249 bld
.base_ptr
= dynamic_state
->base_ptr(dynamic_state
, gallivm
, texture_index
);
2250 bld
.mip_offsets
= dynamic_state
->mip_offsets(dynamic_state
, gallivm
, texture_index
);
2251 /* Note that mip_offsets is an array[level] of offsets to texture images */
2253 /* width, height, depth as single int vector */
2255 bld
.int_size
= tex_width
;
2258 bld
.int_size
= LLVMBuildInsertElement(builder
, bld
.int_size_in_bld
.undef
,
2259 tex_width
, LLVMConstInt(i32t
, 0, 0), "");
2261 LLVMValueRef tex_height
=
2262 dynamic_state
->height(dynamic_state
, gallivm
, texture_index
);
2263 bld
.int_size
= LLVMBuildInsertElement(builder
, bld
.int_size
,
2264 tex_height
, LLVMConstInt(i32t
, 1, 0), "");
2266 LLVMValueRef tex_depth
=
2267 dynamic_state
->depth(dynamic_state
, gallivm
, texture_index
);
2268 bld
.int_size
= LLVMBuildInsertElement(builder
, bld
.int_size
,
2269 tex_depth
, LLVMConstInt(i32t
, 2, 0), "");
2274 for (i
= 0; i
< 5; i
++) {
2275 newcoords
[i
] = coords
[i
];
2279 /* For debug: no-op texture sampling */
2280 lp_build_sample_nop(gallivm
,
2286 else if (is_fetch
) {
2287 lp_build_fetch_texel(&bld
, texture_index
, newcoords
,
2288 explicit_lod
, offsets
,
2293 LLVMValueRef lod_fpart
= NULL
, lod_positive
= NULL
;
2294 LLVMValueRef ilevel0
= NULL
, ilevel1
= NULL
;
2295 boolean use_aos
= util_format_fits_8unorm(bld
.format_desc
) &&
2296 /* not sure this is strictly needed or simply impossible */
2297 derived_sampler_state
.compare_mode
== PIPE_TEX_COMPARE_NONE
&&
2298 lp_is_simple_wrap_mode(derived_sampler_state
.wrap_s
);
2300 use_aos
&= bld
.num_lods
<= num_quads
||
2301 derived_sampler_state
.min_img_filter
==
2302 derived_sampler_state
.mag_img_filter
;
2304 use_aos
&= lp_is_simple_wrap_mode(derived_sampler_state
.wrap_t
);
2306 use_aos
&= lp_is_simple_wrap_mode(derived_sampler_state
.wrap_r
);
2310 if ((gallivm_debug
& GALLIVM_DEBUG_PERF
) &&
2311 !use_aos
&& util_format_fits_8unorm(bld
.format_desc
)) {
2312 debug_printf("%s: using floating point linear filtering for %s\n",
2313 __FUNCTION__
, bld
.format_desc
->short_name
);
2314 debug_printf(" min_img %d mag_img %d mip %d wraps %d wrapt %d wrapr %d\n",
2315 derived_sampler_state
.min_img_filter
,
2316 derived_sampler_state
.mag_img_filter
,
2317 derived_sampler_state
.min_mip_filter
,
2318 derived_sampler_state
.wrap_s
,
2319 derived_sampler_state
.wrap_t
,
2320 derived_sampler_state
.wrap_r
);
2323 lp_build_sample_common(&bld
, texture_index
, sampler_index
,
2325 derivs
, lod_bias
, explicit_lod
,
2326 &lod_positive
, &lod_fpart
,
2327 &ilevel0
, &ilevel1
);
2330 * we only try 8-wide sampling with soa as it appears to
2331 * be a loss with aos with AVX (but it should work, except
2332 * for conformance if min_filter != mag_filter if num_lods > 1).
2333 * (It should be faster if we'd support avx2)
2335 if (num_quads
== 1 || !use_aos
) {
2337 /* do sampling/filtering with fixed pt arithmetic */
2338 lp_build_sample_aos(&bld
, sampler_index
,
2339 newcoords
[0], newcoords
[1],
2341 offsets
, lod_positive
, lod_fpart
,
2347 lp_build_sample_general(&bld
, sampler_index
,
2349 lod_positive
, lod_fpart
,
2356 struct lp_build_sample_context bld4
;
2357 struct lp_type type4
= type
;
2359 LLVMValueRef texelout4
[4];
2360 LLVMValueRef texelouttmp
[4][LP_MAX_VECTOR_LENGTH
/16];
2364 /* Setup our build context */
2365 memset(&bld4
, 0, sizeof bld4
);
2366 bld4
.gallivm
= bld
.gallivm
;
2367 bld4
.static_texture_state
= bld
.static_texture_state
;
2368 bld4
.static_sampler_state
= bld
.static_sampler_state
;
2369 bld4
.dynamic_state
= bld
.dynamic_state
;
2370 bld4
.format_desc
= bld
.format_desc
;
2371 bld4
.dims
= bld
.dims
;
2372 bld4
.row_stride_array
= bld
.row_stride_array
;
2373 bld4
.img_stride_array
= bld
.img_stride_array
;
2374 bld4
.base_ptr
= bld
.base_ptr
;
2375 bld4
.mip_offsets
= bld
.mip_offsets
;
2376 bld4
.int_size
= bld
.int_size
;
2378 bld4
.vector_width
= lp_type_width(type4
);
2380 bld4
.float_type
= lp_type_float(32);
2381 bld4
.int_type
= lp_type_int(32);
2382 bld4
.coord_type
= type4
;
2383 bld4
.int_coord_type
= lp_int_type(type4
);
2384 bld4
.float_size_in_type
= lp_type_float(32);
2385 bld4
.float_size_in_type
.length
= dims
> 1 ? 4 : 1;
2386 bld4
.int_size_in_type
= lp_int_type(bld4
.float_size_in_type
);
2387 bld4
.texel_type
= bld
.texel_type
;
2388 bld4
.texel_type
.length
= 4;
2390 bld4
.num_mips
= bld4
.num_lods
= 1;
2391 if ((gallivm_debug
& GALLIVM_DEBUG_NO_QUAD_LOD
) &&
2392 (gallivm_debug
& GALLIVM_DEBUG_NO_RHO_APPROX
) &&
2393 (static_texture_state
->target
== PIPE_TEXTURE_CUBE
) &&
2394 (!is_fetch
&& mip_filter
!= PIPE_TEX_MIPFILTER_NONE
)) {
2395 bld4
.num_mips
= type4
.length
;
2396 bld4
.num_lods
= type4
.length
;
2398 if (lod_property
== LP_SAMPLER_LOD_PER_ELEMENT
&&
2399 (explicit_lod
|| lod_bias
|| derivs
)) {
2400 if ((is_fetch
&& target
!= PIPE_BUFFER
) ||
2401 (!is_fetch
&& mip_filter
!= PIPE_TEX_MIPFILTER_NONE
)) {
2402 bld4
.num_mips
= type4
.length
;
2403 bld4
.num_lods
= type4
.length
;
2405 else if (!is_fetch
&& min_img_filter
!= mag_img_filter
) {
2407 bld4
.num_lods
= type4
.length
;
2411 /* we want native vector size to be able to use our intrinsics */
2412 bld4
.lodf_type
= type4
;
2413 if (bld4
.num_lods
!= type4
.length
) {
2414 bld4
.lodf_type
.length
= 1;
2416 bld4
.lodi_type
= lp_int_type(bld4
.lodf_type
);
2417 bld4
.levelf_type
= type4
;
2418 if (bld4
.num_mips
!= type4
.length
) {
2419 bld4
.levelf_type
.length
= 1;
2421 bld4
.leveli_type
= lp_int_type(bld4
.levelf_type
);
2422 bld4
.float_size_type
= bld4
.float_size_in_type
;
2423 if (bld4
.num_mips
> 1) {
2424 bld4
.float_size_type
.length
= bld4
.num_mips
== type4
.length
?
2425 bld4
.num_mips
* bld4
.float_size_in_type
.length
:
2428 bld4
.int_size_type
= lp_int_type(bld4
.float_size_type
);
2430 lp_build_context_init(&bld4
.float_bld
, gallivm
, bld4
.float_type
);
2431 lp_build_context_init(&bld4
.float_vec_bld
, gallivm
, type4
);
2432 lp_build_context_init(&bld4
.int_bld
, gallivm
, bld4
.int_type
);
2433 lp_build_context_init(&bld4
.coord_bld
, gallivm
, bld4
.coord_type
);
2434 lp_build_context_init(&bld4
.int_coord_bld
, gallivm
, bld4
.int_coord_type
);
2435 lp_build_context_init(&bld4
.int_size_in_bld
, gallivm
, bld4
.int_size_in_type
);
2436 lp_build_context_init(&bld4
.float_size_in_bld
, gallivm
, bld4
.float_size_in_type
);
2437 lp_build_context_init(&bld4
.int_size_bld
, gallivm
, bld4
.int_size_type
);
2438 lp_build_context_init(&bld4
.float_size_bld
, gallivm
, bld4
.float_size_type
);
2439 lp_build_context_init(&bld4
.texel_bld
, gallivm
, bld4
.texel_type
);
2440 lp_build_context_init(&bld4
.levelf_bld
, gallivm
, bld4
.levelf_type
);
2441 lp_build_context_init(&bld4
.leveli_bld
, gallivm
, bld4
.leveli_type
);
2442 lp_build_context_init(&bld4
.lodf_bld
, gallivm
, bld4
.lodf_type
);
2443 lp_build_context_init(&bld4
.lodi_bld
, gallivm
, bld4
.lodi_type
);
2445 for (i
= 0; i
< num_quads
; i
++) {
2446 LLVMValueRef s4
, t4
, r4
;
2447 LLVMValueRef lod_positive4
, lod_fpart4
= NULL
;
2448 LLVMValueRef ilevel04
, ilevel14
= NULL
;
2449 LLVMValueRef offsets4
[4] = { NULL
};
2450 unsigned num_lods
= bld4
.num_lods
;
2452 s4
= lp_build_extract_range(gallivm
, newcoords
[0], 4*i
, 4);
2453 t4
= lp_build_extract_range(gallivm
, newcoords
[1], 4*i
, 4);
2454 r4
= lp_build_extract_range(gallivm
, newcoords
[2], 4*i
, 4);
2457 offsets4
[0] = lp_build_extract_range(gallivm
, offsets
[0], 4*i
, 4);
2459 offsets4
[1] = lp_build_extract_range(gallivm
, offsets
[1], 4*i
, 4);
2461 offsets4
[2] = lp_build_extract_range(gallivm
, offsets
[2], 4*i
, 4);
2465 lod_positive4
= lp_build_extract_range(gallivm
, lod_positive
, num_lods
* i
, num_lods
);
2466 ilevel04
= bld
.num_mips
== 1 ? ilevel0
:
2467 lp_build_extract_range(gallivm
, ilevel0
, num_lods
* i
, num_lods
);
2468 if (mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
) {
2469 ilevel14
= lp_build_extract_range(gallivm
, ilevel1
, num_lods
* i
, num_lods
);
2470 lod_fpart4
= lp_build_extract_range(gallivm
, lod_fpart
, num_lods
* i
, num_lods
);
2474 /* do sampling/filtering with fixed pt arithmetic */
2475 lp_build_sample_aos(&bld4
, sampler_index
,
2476 s4
, t4
, r4
, offsets4
,
2477 lod_positive4
, lod_fpart4
,
2483 /* this path is currently unreachable and hence might break easily... */
2484 LLVMValueRef newcoords4
[5];
2488 newcoords4
[3] = lp_build_extract_range(gallivm
, newcoords
[3], 4*i
, 4);
2489 newcoords4
[4] = lp_build_extract_range(gallivm
, newcoords
[4], 4*i
, 4);
2491 lp_build_sample_general(&bld4
, sampler_index
,
2492 newcoords4
, offsets4
,
2493 lod_positive4
, lod_fpart4
,
2497 for (j
= 0; j
< 4; j
++) {
2498 texelouttmp
[j
][i
] = texelout4
[j
];
2502 for (j
= 0; j
< 4; j
++) {
2503 texel_out
[j
] = lp_build_concat(gallivm
, texelouttmp
[j
], type4
, num_quads
);
2508 if (target
!= PIPE_BUFFER
) {
2509 apply_sampler_swizzle(&bld
, texel_out
);
2513 * texel type can be a (32bit) int/uint (for pure int formats only),
2514 * however we are expected to always return floats (storage is untyped).
2516 if (!bld
.texel_type
.floating
) {
2518 for (chan
= 0; chan
< 4; chan
++) {
2519 texel_out
[chan
] = LLVMBuildBitCast(builder
, texel_out
[chan
],
2520 lp_build_vec_type(gallivm
, type
), "");
2526 lp_build_size_query_soa(struct gallivm_state
*gallivm
,
2527 const struct lp_static_texture_state
*static_state
,
2528 struct lp_sampler_dynamic_state
*dynamic_state
,
2529 struct lp_type int_type
,
2530 unsigned texture_unit
,
2532 boolean is_sviewinfo
,
2533 enum lp_sampler_lod_property lod_property
,
2534 LLVMValueRef explicit_lod
,
2535 LLVMValueRef
*sizes_out
)
2537 LLVMValueRef lod
, level
, size
;
2538 LLVMValueRef first_level
= NULL
;
2541 unsigned num_lods
= 1;
2542 struct lp_build_context bld_int_vec4
;
2544 if (static_state
->format
== PIPE_FORMAT_NONE
) {
2546 * If there's nothing bound, format is NONE, and we must return
2547 * all zero as mandated by d3d10 in this case.
2550 LLVMValueRef zero
= lp_build_const_vec(gallivm
, int_type
, 0.0F
);
2551 for (chan
= 0; chan
< 4; chan
++) {
2552 sizes_out
[chan
] = zero
;
2558 * Do some sanity verification about bound texture and shader dcl target.
2559 * Not entirely sure what's possible but assume array/non-array
2560 * always compatible (probably not ok for OpenGL but d3d10 has no
2561 * distinction of arrays at the resource level).
2562 * Everything else looks bogus (though not entirely sure about rect/2d).
2563 * Currently disabled because it causes assertion failures if there's
2564 * nothing bound (or rather a dummy texture, not that this case would
2565 * return the right values).
2567 if (0 && static_state
->target
!= target
) {
2568 if (static_state
->target
== PIPE_TEXTURE_1D
)
2569 assert(target
== PIPE_TEXTURE_1D_ARRAY
);
2570 else if (static_state
->target
== PIPE_TEXTURE_1D_ARRAY
)
2571 assert(target
== PIPE_TEXTURE_1D
);
2572 else if (static_state
->target
== PIPE_TEXTURE_2D
)
2573 assert(target
== PIPE_TEXTURE_2D_ARRAY
);
2574 else if (static_state
->target
== PIPE_TEXTURE_2D_ARRAY
)
2575 assert(target
== PIPE_TEXTURE_2D
);
2576 else if (static_state
->target
== PIPE_TEXTURE_CUBE
)
2577 assert(target
== PIPE_TEXTURE_CUBE_ARRAY
);
2578 else if (static_state
->target
== PIPE_TEXTURE_CUBE_ARRAY
)
2579 assert(target
== PIPE_TEXTURE_CUBE
);
2584 dims
= texture_dims(target
);
2587 case PIPE_TEXTURE_1D_ARRAY
:
2588 case PIPE_TEXTURE_2D_ARRAY
:
2596 assert(!int_type
.floating
);
2598 lp_build_context_init(&bld_int_vec4
, gallivm
, lp_type_int_vec(32, 128));
2601 /* FIXME: this needs to honor per-element lod */
2602 lod
= LLVMBuildExtractElement(gallivm
->builder
, explicit_lod
, lp_build_const_int32(gallivm
, 0), "");
2603 first_level
= dynamic_state
->first_level(dynamic_state
, gallivm
, texture_unit
);
2604 level
= LLVMBuildAdd(gallivm
->builder
, lod
, first_level
, "level");
2605 lod
= lp_build_broadcast_scalar(&bld_int_vec4
, level
);
2607 lod
= bld_int_vec4
.zero
;
2610 size
= bld_int_vec4
.undef
;
2612 size
= LLVMBuildInsertElement(gallivm
->builder
, size
,
2613 dynamic_state
->width(dynamic_state
, gallivm
, texture_unit
),
2614 lp_build_const_int32(gallivm
, 0), "");
2617 size
= LLVMBuildInsertElement(gallivm
->builder
, size
,
2618 dynamic_state
->height(dynamic_state
, gallivm
, texture_unit
),
2619 lp_build_const_int32(gallivm
, 1), "");
2623 size
= LLVMBuildInsertElement(gallivm
->builder
, size
,
2624 dynamic_state
->depth(dynamic_state
, gallivm
, texture_unit
),
2625 lp_build_const_int32(gallivm
, 2), "");
2628 size
= lp_build_minify(&bld_int_vec4
, size
, lod
);
2631 size
= LLVMBuildInsertElement(gallivm
->builder
, size
,
2632 dynamic_state
->depth(dynamic_state
, gallivm
, texture_unit
),
2633 lp_build_const_int32(gallivm
, dims
), "");
2636 * d3d10 requires zero for x/y/z values (but not w, i.e. mip levels)
2637 * if level is out of bounds (note this can't cover unbound texture
2638 * here, which also requires returning zero).
2640 if (explicit_lod
&& is_sviewinfo
) {
2641 LLVMValueRef last_level
, out
, out1
;
2642 struct lp_build_context leveli_bld
;
2644 /* everything is scalar for now */
2645 lp_build_context_init(&leveli_bld
, gallivm
, lp_type_int_vec(32, 32));
2646 last_level
= dynamic_state
->last_level(dynamic_state
, gallivm
, texture_unit
);
2648 out
= lp_build_cmp(&leveli_bld
, PIPE_FUNC_LESS
, level
, first_level
);
2649 out1
= lp_build_cmp(&leveli_bld
, PIPE_FUNC_GREATER
, level
, last_level
);
2650 out
= lp_build_or(&leveli_bld
, out
, out1
);
2651 if (num_lods
== 1) {
2652 out
= lp_build_broadcast_scalar(&bld_int_vec4
, out
);
2658 size
= lp_build_andnot(&bld_int_vec4
, size
, out
);
2660 for (i
= 0; i
< dims
+ (has_array
? 1 : 0); i
++) {
2661 sizes_out
[i
] = lp_build_extract_broadcast(gallivm
, bld_int_vec4
.type
, int_type
,
2663 lp_build_const_int32(gallivm
, i
));
2666 for (; i
< 4; i
++) {
2667 sizes_out
[i
] = lp_build_const_vec(gallivm
, int_type
, 0.0);
2672 * if there's no explicit_lod (buffers, rects) queries requiring nr of
2673 * mips would be illegal.
2675 if (is_sviewinfo
&& explicit_lod
) {
2676 struct lp_build_context bld_int_scalar
;
2677 LLVMValueRef num_levels
;
2678 lp_build_context_init(&bld_int_scalar
, gallivm
, lp_type_int(32));
2680 if (static_state
->level_zero_only
) {
2681 num_levels
= bld_int_scalar
.one
;
2684 LLVMValueRef last_level
;
2686 last_level
= dynamic_state
->last_level(dynamic_state
, gallivm
, texture_unit
);
2687 num_levels
= lp_build_sub(&bld_int_scalar
, last_level
, first_level
);
2688 num_levels
= lp_build_add(&bld_int_scalar
, num_levels
, bld_int_scalar
.one
);
2690 sizes_out
[3] = lp_build_broadcast(gallivm
, lp_build_vec_type(gallivm
, int_type
),