1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Texture sampling -- common code.
32 * @author Jose Fonseca <jfonseca@vmware.com>
35 #include "pipe/p_defines.h"
36 #include "pipe/p_state.h"
37 #include "util/u_format.h"
38 #include "util/u_math.h"
39 #include "lp_bld_arit.h"
40 #include "lp_bld_const.h"
41 #include "lp_bld_debug.h"
42 #include "lp_bld_printf.h"
43 #include "lp_bld_flow.h"
44 #include "lp_bld_sample.h"
45 #include "lp_bld_swizzle.h"
46 #include "lp_bld_type.h"
47 #include "lp_bld_logic.h"
48 #include "lp_bld_pack.h"
49 #include "lp_bld_quad.h"
50 #include "lp_bld_bitarit.h"
54 * Bri-linear factor. Should be greater than one.
56 #define BRILINEAR_FACTOR 2
59 * Does the given texture wrap mode allow sampling the texture border color?
60 * XXX maybe move this into gallium util code.
63 lp_sampler_wrap_mode_uses_border_color(unsigned mode
,
64 unsigned min_img_filter
,
65 unsigned mag_img_filter
)
68 case PIPE_TEX_WRAP_REPEAT
:
69 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
70 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
71 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
73 case PIPE_TEX_WRAP_CLAMP
:
74 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
75 if (min_img_filter
== PIPE_TEX_FILTER_NEAREST
&&
76 mag_img_filter
== PIPE_TEX_FILTER_NEAREST
) {
81 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
82 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
85 assert(0 && "unexpected wrap mode");
92 * Initialize lp_sampler_static_texture_state object with the gallium
93 * texture/sampler_view state (this contains the parts which are
97 lp_sampler_static_texture_state(struct lp_static_texture_state
*state
,
98 const struct pipe_sampler_view
*view
)
100 const struct pipe_resource
*texture
;
102 memset(state
, 0, sizeof *state
);
104 if (!view
|| !view
->texture
)
107 texture
= view
->texture
;
109 state
->format
= view
->format
;
110 state
->swizzle_r
= view
->swizzle_r
;
111 state
->swizzle_g
= view
->swizzle_g
;
112 state
->swizzle_b
= view
->swizzle_b
;
113 state
->swizzle_a
= view
->swizzle_a
;
115 state
->target
= texture
->target
;
116 state
->pot_width
= util_is_power_of_two(texture
->width0
);
117 state
->pot_height
= util_is_power_of_two(texture
->height0
);
118 state
->pot_depth
= util_is_power_of_two(texture
->depth0
);
119 state
->level_zero_only
= !view
->u
.tex
.last_level
;
122 * the layer / element / level parameters are all either dynamic
123 * state or handled transparently wrt execution.
129 * Initialize lp_sampler_static_sampler_state object with the gallium sampler
130 * state (this contains the parts which are considered static).
133 lp_sampler_static_sampler_state(struct lp_static_sampler_state
*state
,
134 const struct pipe_sampler_state
*sampler
)
136 memset(state
, 0, sizeof *state
);
142 * We don't copy sampler state over unless it is actually enabled, to avoid
143 * spurious recompiles, as the sampler static state is part of the shader
146 * Ideally the state tracker or cso_cache module would make all state
147 * canonical, but until that happens it's better to be safe than sorry here.
149 * XXX: Actually there's much more than can be done here, especially
150 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
153 state
->wrap_s
= sampler
->wrap_s
;
154 state
->wrap_t
= sampler
->wrap_t
;
155 state
->wrap_r
= sampler
->wrap_r
;
156 state
->min_img_filter
= sampler
->min_img_filter
;
157 state
->mag_img_filter
= sampler
->mag_img_filter
;
158 state
->seamless_cube_map
= sampler
->seamless_cube_map
;
160 if (sampler
->max_lod
> 0.0f
) {
161 state
->min_mip_filter
= sampler
->min_mip_filter
;
163 state
->min_mip_filter
= PIPE_TEX_MIPFILTER_NONE
;
166 if (state
->min_mip_filter
!= PIPE_TEX_MIPFILTER_NONE
||
167 state
->min_img_filter
!= state
->mag_img_filter
) {
168 if (sampler
->lod_bias
!= 0.0f
) {
169 state
->lod_bias_non_zero
= 1;
172 /* If min_lod == max_lod we can greatly simplify mipmap selection.
173 * This is a case that occurs during automatic mipmap generation.
175 if (sampler
->min_lod
== sampler
->max_lod
) {
176 state
->min_max_lod_equal
= 1;
178 if (sampler
->min_lod
> 0.0f
) {
179 state
->apply_min_lod
= 1;
183 * XXX this won't do anything with the mesa state tracker which always
184 * sets max_lod to not more than actually present mip maps...
186 if (sampler
->max_lod
< (PIPE_MAX_TEXTURE_LEVELS
- 1)) {
187 state
->apply_max_lod
= 1;
192 state
->compare_mode
= sampler
->compare_mode
;
193 if (sampler
->compare_mode
!= PIPE_TEX_COMPARE_NONE
) {
194 state
->compare_func
= sampler
->compare_func
;
197 state
->normalized_coords
= sampler
->normalized_coords
;
202 * Generate code to compute coordinate gradient (rho).
203 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
205 * The resulting rho has bld->levelf format (per quad or per element).
208 lp_build_rho(struct lp_build_sample_context
*bld
,
209 unsigned texture_unit
,
213 LLVMValueRef cube_rho
,
214 const struct lp_derivatives
*derivs
)
216 struct gallivm_state
*gallivm
= bld
->gallivm
;
217 struct lp_build_context
*int_size_bld
= &bld
->int_size_in_bld
;
218 struct lp_build_context
*float_size_bld
= &bld
->float_size_in_bld
;
219 struct lp_build_context
*float_bld
= &bld
->float_bld
;
220 struct lp_build_context
*coord_bld
= &bld
->coord_bld
;
221 struct lp_build_context
*rho_bld
= &bld
->lodf_bld
;
222 const unsigned dims
= bld
->dims
;
223 LLVMValueRef ddx_ddy
[2];
224 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
225 LLVMTypeRef i32t
= LLVMInt32TypeInContext(bld
->gallivm
->context
);
226 LLVMValueRef index0
= LLVMConstInt(i32t
, 0, 0);
227 LLVMValueRef index1
= LLVMConstInt(i32t
, 1, 0);
228 LLVMValueRef index2
= LLVMConstInt(i32t
, 2, 0);
229 LLVMValueRef rho_vec
;
230 LLVMValueRef int_size
, float_size
;
232 LLVMValueRef first_level
, first_level_vec
;
233 unsigned length
= coord_bld
->type
.length
;
234 unsigned num_quads
= length
/ 4;
235 boolean rho_per_quad
= rho_bld
->type
.length
!= length
;
236 boolean no_rho_opt
= (gallivm_debug
& GALLIVM_DEBUG_NO_RHO_APPROX
) && (dims
> 1);
238 LLVMValueRef i32undef
= LLVMGetUndef(LLVMInt32TypeInContext(gallivm
->context
));
239 LLVMValueRef rho_xvec
, rho_yvec
;
241 /* Note that all simplified calculations will only work for isotropic filtering */
244 * rho calcs are always per quad except for explicit derivs (excluding
245 * the messy cube maps for now) when requested.
248 first_level
= bld
->dynamic_state
->first_level(bld
->dynamic_state
,
249 bld
->gallivm
, texture_unit
);
250 first_level_vec
= lp_build_broadcast_scalar(int_size_bld
, first_level
);
251 int_size
= lp_build_minify(int_size_bld
, bld
->int_size
, first_level_vec
);
252 float_size
= lp_build_int_to_float(float_size_bld
, int_size
);
255 LLVMValueRef cubesize
;
256 LLVMValueRef index0
= lp_build_const_int32(gallivm
, 0);
259 * Cube map code did already everything except size mul and per-quad extraction.
260 * Luckily cube maps are always quadratic!
263 rho
= lp_build_pack_aos_scalars(bld
->gallivm
, coord_bld
->type
,
264 rho_bld
->type
, cube_rho
, 0);
267 rho
= lp_build_swizzle_scalar_aos(coord_bld
, cube_rho
, 0, 4);
269 /* Could optimize this for single quad just skip the broadcast */
270 cubesize
= lp_build_extract_broadcast(gallivm
, bld
->float_size_in_type
,
271 rho_bld
->type
, float_size
, index0
);
272 /* skipping sqrt hence returning rho squared */
273 cubesize
= lp_build_mul(rho_bld
, cubesize
, cubesize
);
274 rho
= lp_build_mul(rho_bld
, cubesize
, rho
);
277 LLVMValueRef ddmax
[3], ddx
[3], ddy
[3];
278 for (i
= 0; i
< dims
; i
++) {
279 LLVMValueRef floatdim
;
280 LLVMValueRef indexi
= lp_build_const_int32(gallivm
, i
);
282 floatdim
= lp_build_extract_broadcast(gallivm
, bld
->float_size_in_type
,
283 coord_bld
->type
, float_size
, indexi
);
286 * note that for rho_per_quad case could reduce math (at some shuffle
287 * cost), but for now use same code to per-pixel lod case.
290 ddx
[i
] = lp_build_mul(coord_bld
, floatdim
, derivs
->ddx
[i
]);
291 ddy
[i
] = lp_build_mul(coord_bld
, floatdim
, derivs
->ddy
[i
]);
292 ddx
[i
] = lp_build_mul(coord_bld
, ddx
[i
], ddx
[i
]);
293 ddy
[i
] = lp_build_mul(coord_bld
, ddy
[i
], ddy
[i
]);
296 LLVMValueRef tmpx
, tmpy
;
297 tmpx
= lp_build_abs(coord_bld
, derivs
->ddx
[i
]);
298 tmpy
= lp_build_abs(coord_bld
, derivs
->ddy
[i
]);
299 ddmax
[i
] = lp_build_max(coord_bld
, tmpx
, tmpy
);
300 ddmax
[i
] = lp_build_mul(coord_bld
, floatdim
, ddmax
[i
]);
304 rho_xvec
= lp_build_add(coord_bld
, ddx
[0], ddx
[1]);
305 rho_yvec
= lp_build_add(coord_bld
, ddy
[0], ddy
[1]);
307 rho_xvec
= lp_build_add(coord_bld
, rho_xvec
, ddx
[2]);
308 rho_yvec
= lp_build_add(coord_bld
, rho_yvec
, ddy
[2]);
310 rho
= lp_build_max(coord_bld
, rho_xvec
, rho_yvec
);
311 /* skipping sqrt hence returning rho squared */
316 rho
= lp_build_max(coord_bld
, rho
, ddmax
[1]);
318 rho
= lp_build_max(coord_bld
, rho
, ddmax
[2]);
324 * rho_vec contains per-pixel rho, convert to scalar per quad.
326 rho
= lp_build_pack_aos_scalars(bld
->gallivm
, coord_bld
->type
,
327 rho_bld
->type
, rho
, 0);
332 * This looks all a bit complex, but it's not that bad
333 * (the shuffle code makes it look worse than it is).
334 * Still, might not be ideal for all cases.
336 static const unsigned char swizzle0
[] = { /* no-op swizzle */
337 0, LP_BLD_SWIZZLE_DONTCARE
,
338 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
340 static const unsigned char swizzle1
[] = {
341 1, LP_BLD_SWIZZLE_DONTCARE
,
342 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
344 static const unsigned char swizzle2
[] = {
345 2, LP_BLD_SWIZZLE_DONTCARE
,
346 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
350 ddx_ddy
[0] = lp_build_packed_ddx_ddy_onecoord(coord_bld
, s
);
352 else if (dims
>= 2) {
353 ddx_ddy
[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld
, s
, t
);
355 ddx_ddy
[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld
, r
);
360 static const unsigned char swizzle01
[] = { /* no-op swizzle */
362 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
364 static const unsigned char swizzle23
[] = {
366 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
368 LLVMValueRef ddx_ddys
, ddx_ddyt
, floatdim
, shuffles
[LP_MAX_VECTOR_LENGTH
/ 4];
370 for (i
= 0; i
< num_quads
; i
++) {
371 shuffles
[i
*4+0] = shuffles
[i
*4+1] = index0
;
372 shuffles
[i
*4+2] = shuffles
[i
*4+3] = index1
;
374 floatdim
= LLVMBuildShuffleVector(builder
, float_size
, float_size
,
375 LLVMConstVector(shuffles
, length
), "");
376 ddx_ddy
[0] = lp_build_mul(coord_bld
, ddx_ddy
[0], floatdim
);
377 ddx_ddy
[0] = lp_build_mul(coord_bld
, ddx_ddy
[0], ddx_ddy
[0]);
378 ddx_ddys
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle01
);
379 ddx_ddyt
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle23
);
380 rho_vec
= lp_build_add(coord_bld
, ddx_ddys
, ddx_ddyt
);
383 static const unsigned char swizzle02
[] = {
385 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
387 floatdim
= lp_build_extract_broadcast(gallivm
, bld
->float_size_in_type
,
388 coord_bld
->type
, float_size
, index2
);
389 ddx_ddy
[1] = lp_build_mul(coord_bld
, ddx_ddy
[1], floatdim
);
390 ddx_ddy
[1] = lp_build_mul(coord_bld
, ddx_ddy
[1], ddx_ddy
[1]);
391 ddx_ddy
[1] = lp_build_swizzle_aos(coord_bld
, ddx_ddy
[1], swizzle02
);
392 rho_vec
= lp_build_add(coord_bld
, rho_vec
, ddx_ddy
[1]);
395 rho_xvec
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle0
);
396 rho_yvec
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle1
);
397 rho
= lp_build_max(coord_bld
, rho_xvec
, rho_yvec
);
400 rho
= lp_build_pack_aos_scalars(bld
->gallivm
, coord_bld
->type
,
401 rho_bld
->type
, rho
, 0);
404 rho
= lp_build_swizzle_scalar_aos(coord_bld
, rho
, 0, 4);
406 /* skipping sqrt hence returning rho squared */
409 ddx_ddy
[0] = lp_build_abs(coord_bld
, ddx_ddy
[0]);
411 ddx_ddy
[1] = lp_build_abs(coord_bld
, ddx_ddy
[1]);
415 rho_xvec
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle0
);
416 rho_yvec
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle2
);
418 else if (dims
== 2) {
419 static const unsigned char swizzle02
[] = {
421 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
423 static const unsigned char swizzle13
[] = {
425 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
427 rho_xvec
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle02
);
428 rho_yvec
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle13
);
431 LLVMValueRef shuffles1
[LP_MAX_VECTOR_LENGTH
];
432 LLVMValueRef shuffles2
[LP_MAX_VECTOR_LENGTH
];
434 for (i
= 0; i
< num_quads
; i
++) {
435 shuffles1
[4*i
+ 0] = lp_build_const_int32(gallivm
, 4*i
);
436 shuffles1
[4*i
+ 1] = lp_build_const_int32(gallivm
, 4*i
+ 2);
437 shuffles1
[4*i
+ 2] = lp_build_const_int32(gallivm
, length
+ 4*i
);
438 shuffles1
[4*i
+ 3] = i32undef
;
439 shuffles2
[4*i
+ 0] = lp_build_const_int32(gallivm
, 4*i
+ 1);
440 shuffles2
[4*i
+ 1] = lp_build_const_int32(gallivm
, 4*i
+ 3);
441 shuffles2
[4*i
+ 2] = lp_build_const_int32(gallivm
, length
+ 4*i
+ 2);
442 shuffles2
[4*i
+ 3] = i32undef
;
444 rho_xvec
= LLVMBuildShuffleVector(builder
, ddx_ddy
[0], ddx_ddy
[1],
445 LLVMConstVector(shuffles1
, length
), "");
446 rho_yvec
= LLVMBuildShuffleVector(builder
, ddx_ddy
[0], ddx_ddy
[1],
447 LLVMConstVector(shuffles2
, length
), "");
450 rho_vec
= lp_build_max(coord_bld
, rho_xvec
, rho_yvec
);
452 if (bld
->coord_type
.length
> 4) {
453 /* expand size to each quad */
455 /* could use some broadcast_vector helper for this? */
456 LLVMValueRef src
[LP_MAX_VECTOR_LENGTH
/4];
457 for (i
= 0; i
< num_quads
; i
++) {
460 float_size
= lp_build_concat(bld
->gallivm
, src
, float_size_bld
->type
, num_quads
);
463 float_size
= lp_build_broadcast_scalar(coord_bld
, float_size
);
465 rho_vec
= lp_build_mul(coord_bld
, rho_vec
, float_size
);
472 LLVMValueRef rho_s
, rho_t
, rho_r
;
474 rho_s
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle0
);
475 rho_t
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle1
);
477 rho
= lp_build_max(coord_bld
, rho_s
, rho_t
);
480 rho_r
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle2
);
481 rho
= lp_build_max(coord_bld
, rho
, rho_r
);
486 rho
= lp_build_pack_aos_scalars(bld
->gallivm
, coord_bld
->type
,
487 rho_bld
->type
, rho
, 0);
490 rho
= lp_build_swizzle_scalar_aos(coord_bld
, rho
, 0, 4);
495 rho_vec
= LLVMBuildExtractElement(builder
, rho_vec
, index0
, "");
497 rho_vec
= lp_build_mul(float_size_bld
, rho_vec
, float_size
);
504 LLVMValueRef rho_s
, rho_t
, rho_r
;
506 rho_s
= LLVMBuildExtractElement(builder
, rho_vec
, index0
, "");
507 rho_t
= LLVMBuildExtractElement(builder
, rho_vec
, index1
, "");
509 rho
= lp_build_max(float_bld
, rho_s
, rho_t
);
512 rho_r
= LLVMBuildExtractElement(builder
, rho_vec
, index2
, "");
513 rho
= lp_build_max(float_bld
, rho
, rho_r
);
518 rho
= lp_build_broadcast_scalar(rho_bld
, rho
);
529 * Bri-linear lod computation
531 * Use a piece-wise linear approximation of log2 such that:
532 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
533 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
534 * with the steepness specified in 'factor'
535 * - exact result for 0.5, 1.5, etc.
551 * This is a technique also commonly used in hardware:
552 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
554 * TODO: For correctness, this should only be applied when texture is known to
555 * have regular mipmaps, i.e., mipmaps derived from the base level.
557 * TODO: This could be done in fixed point, where applicable.
560 lp_build_brilinear_lod(struct lp_build_context
*bld
,
563 LLVMValueRef
*out_lod_ipart
,
564 LLVMValueRef
*out_lod_fpart
)
566 LLVMValueRef lod_fpart
;
567 double pre_offset
= (factor
- 0.5)/factor
- 0.5;
568 double post_offset
= 1 - factor
;
571 lp_build_printf(bld
->gallivm
, "lod = %f\n", lod
);
574 lod
= lp_build_add(bld
, lod
,
575 lp_build_const_vec(bld
->gallivm
, bld
->type
, pre_offset
));
577 lp_build_ifloor_fract(bld
, lod
, out_lod_ipart
, &lod_fpart
);
579 lod_fpart
= lp_build_mul(bld
, lod_fpart
,
580 lp_build_const_vec(bld
->gallivm
, bld
->type
, factor
));
582 lod_fpart
= lp_build_add(bld
, lod_fpart
,
583 lp_build_const_vec(bld
->gallivm
, bld
->type
, post_offset
));
586 * It's not necessary to clamp lod_fpart since:
587 * - the above expression will never produce numbers greater than one.
588 * - the mip filtering branch is only taken if lod_fpart is positive
591 *out_lod_fpart
= lod_fpart
;
594 lp_build_printf(bld
->gallivm
, "lod_ipart = %i\n", *out_lod_ipart
);
595 lp_build_printf(bld
->gallivm
, "lod_fpart = %f\n\n", *out_lod_fpart
);
601 * Combined log2 and brilinear lod computation.
603 * It's in all identical to calling lp_build_fast_log2() and
604 * lp_build_brilinear_lod() above, but by combining we can compute the integer
605 * and fractional part independently.
608 lp_build_brilinear_rho(struct lp_build_context
*bld
,
611 LLVMValueRef
*out_lod_ipart
,
612 LLVMValueRef
*out_lod_fpart
)
614 LLVMValueRef lod_ipart
;
615 LLVMValueRef lod_fpart
;
617 const double pre_factor
= (2*factor
- 0.5)/(M_SQRT2
*factor
);
618 const double post_offset
= 1 - 2*factor
;
620 assert(bld
->type
.floating
);
622 assert(lp_check_value(bld
->type
, rho
));
625 * The pre factor will make the intersections with the exact powers of two
626 * happen precisely where we want them to be, which means that the integer
627 * part will not need any post adjustments.
629 rho
= lp_build_mul(bld
, rho
,
630 lp_build_const_vec(bld
->gallivm
, bld
->type
, pre_factor
));
632 /* ipart = ifloor(log2(rho)) */
633 lod_ipart
= lp_build_extract_exponent(bld
, rho
, 0);
635 /* fpart = rho / 2**ipart */
636 lod_fpart
= lp_build_extract_mantissa(bld
, rho
);
638 lod_fpart
= lp_build_mul(bld
, lod_fpart
,
639 lp_build_const_vec(bld
->gallivm
, bld
->type
, factor
));
641 lod_fpart
= lp_build_add(bld
, lod_fpart
,
642 lp_build_const_vec(bld
->gallivm
, bld
->type
, post_offset
));
645 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
646 * - the above expression will never produce numbers greater than one.
647 * - the mip filtering branch is only taken if lod_fpart is positive
650 *out_lod_ipart
= lod_ipart
;
651 *out_lod_fpart
= lod_fpart
;
656 * Fast implementation of iround(log2(sqrt(x))), based on
657 * log2(x^n) == n*log2(x).
659 * Gives accurate results all the time.
660 * (Could be trivially extended to handle other power-of-two roots.)
663 lp_build_ilog2_sqrt(struct lp_build_context
*bld
,
666 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
668 struct lp_type i_type
= lp_int_type(bld
->type
);
669 LLVMValueRef one
= lp_build_const_int_vec(bld
->gallivm
, i_type
, 1);
671 assert(bld
->type
.floating
);
673 assert(lp_check_value(bld
->type
, x
));
675 /* ipart = log2(x) + 0.5 = 0.5*(log2(x^2) + 1.0) */
676 ipart
= lp_build_extract_exponent(bld
, x
, 1);
677 ipart
= LLVMBuildAShr(builder
, ipart
, one
, "");
684 * Generate code to compute texture level of detail (lambda).
685 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
686 * \param lod_bias optional float vector with the shader lod bias
687 * \param explicit_lod optional float vector with the explicit lod
688 * \param cube_rho rho calculated by cube coord mapping (optional)
689 * \param out_lod_ipart integer part of lod
690 * \param out_lod_fpart float part of lod (never larger than 1 but may be negative)
691 * \param out_lod_positive (mask) if lod is positive (i.e. texture is minified)
693 * The resulting lod can be scalar per quad or be per element.
696 lp_build_lod_selector(struct lp_build_sample_context
*bld
,
697 unsigned texture_unit
,
698 unsigned sampler_unit
,
702 LLVMValueRef cube_rho
,
703 const struct lp_derivatives
*derivs
,
704 LLVMValueRef lod_bias
, /* optional */
705 LLVMValueRef explicit_lod
, /* optional */
707 LLVMValueRef
*out_lod_ipart
,
708 LLVMValueRef
*out_lod_fpart
,
709 LLVMValueRef
*out_lod_positive
)
712 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
713 struct lp_build_context
*lodf_bld
= &bld
->lodf_bld
;
716 *out_lod_ipart
= bld
->lodi_bld
.zero
;
717 *out_lod_positive
= bld
->lodi_bld
.zero
;
718 *out_lod_fpart
= lodf_bld
->zero
;
721 * For determining min/mag, we follow GL 4.1 spec, 3.9.12 Texture Magnification:
722 * "Implementations may either unconditionally assume c = 0 for the minification
723 * vs. magnification switch-over point, or may choose to make c depend on the
724 * combination of minification and magnification modes as follows: if the
725 * magnification filter is given by LINEAR and the minification filter is given
726 * by NEAREST_MIPMAP_NEAREST or NEAREST_MIPMAP_LINEAR, then c = 0.5. This is
727 * done to ensure that a minified texture does not appear "sharper" than a
728 * magnified texture. Otherwise c = 0."
729 * And 3.9.11 Texture Minification:
730 * "If lod is less than or equal to the constant c (see section 3.9.12) the
731 * texture is said to be magnified; if it is greater, the texture is minified."
732 * So, using 0 as switchover point always, and using magnification for lod == 0.
733 * Note that the always c = 0 behavior is new (first appearing in GL 3.1 spec),
734 * old GL versions required 0.5 for the modes listed above.
735 * I have no clue about the (undocumented) wishes of d3d9/d3d10 here!
738 if (bld
->static_sampler_state
->min_max_lod_equal
) {
739 /* User is forcing sampling from a particular mipmap level.
740 * This is hit during mipmap generation.
742 LLVMValueRef min_lod
=
743 bld
->dynamic_state
->min_lod(bld
->dynamic_state
,
744 bld
->gallivm
, sampler_unit
);
746 lod
= lp_build_broadcast_scalar(lodf_bld
, min_lod
);
750 if (bld
->num_lods
!= bld
->coord_type
.length
)
751 lod
= lp_build_pack_aos_scalars(bld
->gallivm
, bld
->coord_bld
.type
,
752 lodf_bld
->type
, explicit_lod
, 0);
758 boolean rho_squared
= ((gallivm_debug
& GALLIVM_DEBUG_NO_RHO_APPROX
) &&
759 (bld
->dims
> 1)) || cube_rho
;
761 rho
= lp_build_rho(bld
, texture_unit
, s
, t
, r
, cube_rho
, derivs
);
764 * Compute lod = log2(rho)
768 !bld
->static_sampler_state
->lod_bias_non_zero
&&
769 !bld
->static_sampler_state
->apply_max_lod
&&
770 !bld
->static_sampler_state
->apply_min_lod
) {
772 * Special case when there are no post-log2 adjustments, which
773 * saves instructions but keeping the integer and fractional lod
774 * computations separate from the start.
777 if (mip_filter
== PIPE_TEX_MIPFILTER_NONE
||
778 mip_filter
== PIPE_TEX_MIPFILTER_NEAREST
) {
780 * Don't actually need both values all the time, lod_ipart is
781 * needed for nearest mipfilter, lod_positive if min != mag.
784 *out_lod_ipart
= lp_build_ilog2_sqrt(lodf_bld
, rho
);
787 *out_lod_ipart
= lp_build_ilog2(lodf_bld
, rho
);
789 *out_lod_positive
= lp_build_cmp(lodf_bld
, PIPE_FUNC_GREATER
,
793 if (mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
&&
794 !(gallivm_debug
& GALLIVM_DEBUG_NO_BRILINEAR
) &&
797 * This can't work if rho is squared. Not sure if it could be
798 * fixed while keeping it worthwile, could also do sqrt here
799 * but brilinear and no_rho_opt seems like a combination not
800 * making much sense anyway so just use ordinary path below.
802 lp_build_brilinear_rho(lodf_bld
, rho
, BRILINEAR_FACTOR
,
803 out_lod_ipart
, out_lod_fpart
);
804 *out_lod_positive
= lp_build_cmp(lodf_bld
, PIPE_FUNC_GREATER
,
811 lod
= lp_build_log2(lodf_bld
, rho
);
814 lod
= lp_build_fast_log2(lodf_bld
, rho
);
817 /* log2(x^2) == 0.5*log2(x) */
818 lod
= lp_build_mul(lodf_bld
, lod
,
819 lp_build_const_vec(bld
->gallivm
, lodf_bld
->type
, 0.5F
));
822 /* add shader lod bias */
824 if (bld
->num_lods
!= bld
->coord_type
.length
)
825 lod_bias
= lp_build_pack_aos_scalars(bld
->gallivm
, bld
->coord_bld
.type
,
826 lodf_bld
->type
, lod_bias
, 0);
827 lod
= LLVMBuildFAdd(builder
, lod
, lod_bias
, "shader_lod_bias");
831 /* add sampler lod bias */
832 if (bld
->static_sampler_state
->lod_bias_non_zero
) {
833 LLVMValueRef sampler_lod_bias
=
834 bld
->dynamic_state
->lod_bias(bld
->dynamic_state
,
835 bld
->gallivm
, sampler_unit
);
836 sampler_lod_bias
= lp_build_broadcast_scalar(lodf_bld
,
838 lod
= LLVMBuildFAdd(builder
, lod
, sampler_lod_bias
, "sampler_lod_bias");
842 if (bld
->static_sampler_state
->apply_max_lod
) {
843 LLVMValueRef max_lod
=
844 bld
->dynamic_state
->max_lod(bld
->dynamic_state
,
845 bld
->gallivm
, sampler_unit
);
846 max_lod
= lp_build_broadcast_scalar(lodf_bld
, max_lod
);
848 lod
= lp_build_min(lodf_bld
, lod
, max_lod
);
850 if (bld
->static_sampler_state
->apply_min_lod
) {
851 LLVMValueRef min_lod
=
852 bld
->dynamic_state
->min_lod(bld
->dynamic_state
,
853 bld
->gallivm
, sampler_unit
);
854 min_lod
= lp_build_broadcast_scalar(lodf_bld
, min_lod
);
856 lod
= lp_build_max(lodf_bld
, lod
, min_lod
);
860 *out_lod_positive
= lp_build_cmp(lodf_bld
, PIPE_FUNC_GREATER
,
861 lod
, lodf_bld
->zero
);
863 if (mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
) {
864 if (!(gallivm_debug
& GALLIVM_DEBUG_NO_BRILINEAR
)) {
865 lp_build_brilinear_lod(lodf_bld
, lod
, BRILINEAR_FACTOR
,
866 out_lod_ipart
, out_lod_fpart
);
869 lp_build_ifloor_fract(lodf_bld
, lod
, out_lod_ipart
, out_lod_fpart
);
872 lp_build_name(*out_lod_fpart
, "lod_fpart");
875 *out_lod_ipart
= lp_build_iround(lodf_bld
, lod
);
878 lp_build_name(*out_lod_ipart
, "lod_ipart");
885 * For PIPE_TEX_MIPFILTER_NEAREST, convert int part of lod
886 * to actual mip level.
887 * Note: this is all scalar per quad code.
888 * \param lod_ipart int texture level of detail
889 * \param level_out returns integer
890 * \param out_of_bounds returns per coord out_of_bounds mask if provided
893 lp_build_nearest_mip_level(struct lp_build_sample_context
*bld
,
894 unsigned texture_unit
,
895 LLVMValueRef lod_ipart
,
896 LLVMValueRef
*level_out
,
897 LLVMValueRef
*out_of_bounds
)
899 struct lp_build_context
*leveli_bld
= &bld
->leveli_bld
;
900 LLVMValueRef first_level
, last_level
, level
;
902 first_level
= bld
->dynamic_state
->first_level(bld
->dynamic_state
,
903 bld
->gallivm
, texture_unit
);
904 last_level
= bld
->dynamic_state
->last_level(bld
->dynamic_state
,
905 bld
->gallivm
, texture_unit
);
906 first_level
= lp_build_broadcast_scalar(leveli_bld
, first_level
);
907 last_level
= lp_build_broadcast_scalar(leveli_bld
, last_level
);
909 level
= lp_build_add(leveli_bld
, lod_ipart
, first_level
);
912 LLVMValueRef out
, out1
;
913 out
= lp_build_cmp(leveli_bld
, PIPE_FUNC_LESS
, level
, first_level
);
914 out1
= lp_build_cmp(leveli_bld
, PIPE_FUNC_GREATER
, level
, last_level
);
915 out
= lp_build_or(leveli_bld
, out
, out1
);
916 if (bld
->num_mips
== bld
->coord_bld
.type
.length
) {
917 *out_of_bounds
= out
;
919 else if (bld
->num_mips
== 1) {
920 *out_of_bounds
= lp_build_broadcast_scalar(&bld
->int_coord_bld
, out
);
923 assert(bld
->num_mips
== bld
->coord_bld
.type
.length
/ 4);
924 *out_of_bounds
= lp_build_unpack_broadcast_aos_scalars(bld
->gallivm
,
926 bld
->int_coord_bld
.type
,
932 /* clamp level to legal range of levels */
933 *level_out
= lp_build_clamp(leveli_bld
, level
, first_level
, last_level
);
940 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad (or per element) int LOD(s)
941 * to two (per-quad) (adjacent) mipmap level indexes, and fix up float lod
943 * Later, we'll sample from those two mipmap levels and interpolate between them.
946 lp_build_linear_mip_levels(struct lp_build_sample_context
*bld
,
947 unsigned texture_unit
,
948 LLVMValueRef lod_ipart
,
949 LLVMValueRef
*lod_fpart_inout
,
950 LLVMValueRef
*level0_out
,
951 LLVMValueRef
*level1_out
)
953 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
954 struct lp_build_context
*leveli_bld
= &bld
->leveli_bld
;
955 struct lp_build_context
*levelf_bld
= &bld
->levelf_bld
;
956 LLVMValueRef first_level
, last_level
;
957 LLVMValueRef clamp_min
;
958 LLVMValueRef clamp_max
;
960 assert(bld
->num_lods
== bld
->num_mips
);
962 first_level
= bld
->dynamic_state
->first_level(bld
->dynamic_state
,
963 bld
->gallivm
, texture_unit
);
964 last_level
= bld
->dynamic_state
->last_level(bld
->dynamic_state
,
965 bld
->gallivm
, texture_unit
);
966 first_level
= lp_build_broadcast_scalar(leveli_bld
, first_level
);
967 last_level
= lp_build_broadcast_scalar(leveli_bld
, last_level
);
969 *level0_out
= lp_build_add(leveli_bld
, lod_ipart
, first_level
);
970 *level1_out
= lp_build_add(leveli_bld
, *level0_out
, leveli_bld
->one
);
973 * Clamp both *level0_out and *level1_out to [first_level, last_level], with
974 * the minimum number of comparisons, and zeroing lod_fpart in the extreme
975 * ends in the process.
979 * This code (vector select in particular) only works with llvm 3.1
980 * (if there's more than one quad, with x86 backend). Might consider
981 * converting to our lp_bld_logic helpers.
983 #if HAVE_LLVM < 0x0301
984 assert(leveli_bld
->type
.length
== 1);
987 /* *level0_out < first_level */
988 clamp_min
= LLVMBuildICmp(builder
, LLVMIntSLT
,
989 *level0_out
, first_level
,
990 "clamp_lod_to_first");
992 *level0_out
= LLVMBuildSelect(builder
, clamp_min
,
993 first_level
, *level0_out
, "");
995 *level1_out
= LLVMBuildSelect(builder
, clamp_min
,
996 first_level
, *level1_out
, "");
998 *lod_fpart_inout
= LLVMBuildSelect(builder
, clamp_min
,
999 levelf_bld
->zero
, *lod_fpart_inout
, "");
1001 /* *level0_out >= last_level */
1002 clamp_max
= LLVMBuildICmp(builder
, LLVMIntSGE
,
1003 *level0_out
, last_level
,
1004 "clamp_lod_to_last");
1006 *level0_out
= LLVMBuildSelect(builder
, clamp_max
,
1007 last_level
, *level0_out
, "");
1009 *level1_out
= LLVMBuildSelect(builder
, clamp_max
,
1010 last_level
, *level1_out
, "");
1012 *lod_fpart_inout
= LLVMBuildSelect(builder
, clamp_max
,
1013 levelf_bld
->zero
, *lod_fpart_inout
, "");
1015 lp_build_name(*level0_out
, "texture%u_miplevel0", texture_unit
);
1016 lp_build_name(*level1_out
, "texture%u_miplevel1", texture_unit
);
1017 lp_build_name(*lod_fpart_inout
, "texture%u_mipweight", texture_unit
);
1022 * Return pointer to a single mipmap level.
1023 * \param level integer mipmap level
1026 lp_build_get_mipmap_level(struct lp_build_sample_context
*bld
,
1029 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1030 LLVMValueRef indexes
[2], data_ptr
, mip_offset
;
1032 indexes
[0] = lp_build_const_int32(bld
->gallivm
, 0);
1034 mip_offset
= LLVMBuildGEP(builder
, bld
->mip_offsets
, indexes
, 2, "");
1035 mip_offset
= LLVMBuildLoad(builder
, mip_offset
, "");
1036 data_ptr
= LLVMBuildGEP(builder
, bld
->base_ptr
, &mip_offset
, 1, "");
1041 * Return (per-pixel) offsets to mip levels.
1042 * \param level integer mipmap level
1045 lp_build_get_mip_offsets(struct lp_build_sample_context
*bld
,
1048 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1049 LLVMValueRef indexes
[2], offsets
, offset1
;
1051 indexes
[0] = lp_build_const_int32(bld
->gallivm
, 0);
1052 if (bld
->num_mips
== 1) {
1054 offset1
= LLVMBuildGEP(builder
, bld
->mip_offsets
, indexes
, 2, "");
1055 offset1
= LLVMBuildLoad(builder
, offset1
, "");
1056 offsets
= lp_build_broadcast_scalar(&bld
->int_coord_bld
, offset1
);
1058 else if (bld
->num_mips
== bld
->coord_bld
.type
.length
/ 4) {
1061 offsets
= bld
->int_coord_bld
.undef
;
1062 for (i
= 0; i
< bld
->num_mips
; i
++) {
1063 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1064 LLVMValueRef indexo
= lp_build_const_int32(bld
->gallivm
, 4 * i
);
1065 indexes
[1] = LLVMBuildExtractElement(builder
, level
, indexi
, "");
1066 offset1
= LLVMBuildGEP(builder
, bld
->mip_offsets
, indexes
, 2, "");
1067 offset1
= LLVMBuildLoad(builder
, offset1
, "");
1068 offsets
= LLVMBuildInsertElement(builder
, offsets
, offset1
, indexo
, "");
1070 offsets
= lp_build_swizzle_scalar_aos(&bld
->int_coord_bld
, offsets
, 0, 4);
1075 assert (bld
->num_mips
== bld
->coord_bld
.type
.length
);
1077 offsets
= bld
->int_coord_bld
.undef
;
1078 for (i
= 0; i
< bld
->num_mips
; i
++) {
1079 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1080 indexes
[1] = LLVMBuildExtractElement(builder
, level
, indexi
, "");
1081 offset1
= LLVMBuildGEP(builder
, bld
->mip_offsets
, indexes
, 2, "");
1082 offset1
= LLVMBuildLoad(builder
, offset1
, "");
1083 offsets
= LLVMBuildInsertElement(builder
, offsets
, offset1
, indexi
, "");
1091 * Codegen equivalent for u_minify().
1092 * Return max(1, base_size >> level);
1095 lp_build_minify(struct lp_build_context
*bld
,
1096 LLVMValueRef base_size
,
1099 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1100 assert(lp_check_value(bld
->type
, base_size
));
1101 assert(lp_check_value(bld
->type
, level
));
1103 if (level
== bld
->zero
) {
1104 /* if we're using mipmap level zero, no minification is needed */
1109 LLVMBuildLShr(builder
, base_size
, level
, "minify");
1110 assert(bld
->type
.sign
);
1111 size
= lp_build_max(bld
, size
, bld
->one
);
1118 * Dereference stride_array[mipmap_level] array to get a stride.
1119 * Return stride as a vector.
1122 lp_build_get_level_stride_vec(struct lp_build_sample_context
*bld
,
1123 LLVMValueRef stride_array
, LLVMValueRef level
)
1125 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1126 LLVMValueRef indexes
[2], stride
, stride1
;
1127 indexes
[0] = lp_build_const_int32(bld
->gallivm
, 0);
1128 if (bld
->num_mips
== 1) {
1130 stride1
= LLVMBuildGEP(builder
, stride_array
, indexes
, 2, "");
1131 stride1
= LLVMBuildLoad(builder
, stride1
, "");
1132 stride
= lp_build_broadcast_scalar(&bld
->int_coord_bld
, stride1
);
1134 else if (bld
->num_mips
== bld
->coord_bld
.type
.length
/ 4) {
1135 LLVMValueRef stride1
;
1138 stride
= bld
->int_coord_bld
.undef
;
1139 for (i
= 0; i
< bld
->num_mips
; i
++) {
1140 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1141 LLVMValueRef indexo
= lp_build_const_int32(bld
->gallivm
, 4 * i
);
1142 indexes
[1] = LLVMBuildExtractElement(builder
, level
, indexi
, "");
1143 stride1
= LLVMBuildGEP(builder
, stride_array
, indexes
, 2, "");
1144 stride1
= LLVMBuildLoad(builder
, stride1
, "");
1145 stride
= LLVMBuildInsertElement(builder
, stride
, stride1
, indexo
, "");
1147 stride
= lp_build_swizzle_scalar_aos(&bld
->int_coord_bld
, stride
, 0, 4);
1150 LLVMValueRef stride1
;
1153 assert (bld
->num_mips
== bld
->coord_bld
.type
.length
);
1155 stride
= bld
->int_coord_bld
.undef
;
1156 for (i
= 0; i
< bld
->coord_bld
.type
.length
; i
++) {
1157 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1158 indexes
[1] = LLVMBuildExtractElement(builder
, level
, indexi
, "");
1159 stride1
= LLVMBuildGEP(builder
, stride_array
, indexes
, 2, "");
1160 stride1
= LLVMBuildLoad(builder
, stride1
, "");
1161 stride
= LLVMBuildInsertElement(builder
, stride
, stride1
, indexi
, "");
1169 * When sampling a mipmap, we need to compute the width, height, depth
1170 * of the source levels from the level indexes. This helper function
1174 lp_build_mipmap_level_sizes(struct lp_build_sample_context
*bld
,
1175 LLVMValueRef ilevel
,
1176 LLVMValueRef
*out_size
,
1177 LLVMValueRef
*row_stride_vec
,
1178 LLVMValueRef
*img_stride_vec
)
1180 const unsigned dims
= bld
->dims
;
1181 LLVMValueRef ilevel_vec
;
1184 * Compute width, height, depth at mipmap level 'ilevel'
1186 if (bld
->num_mips
== 1) {
1187 ilevel_vec
= lp_build_broadcast_scalar(&bld
->int_size_bld
, ilevel
);
1188 *out_size
= lp_build_minify(&bld
->int_size_bld
, bld
->int_size
, ilevel_vec
);
1191 LLVMValueRef int_size_vec
;
1192 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
1193 unsigned num_quads
= bld
->coord_bld
.type
.length
/ 4;
1196 if (bld
->num_mips
== num_quads
) {
1198 * XXX: this should be #ifndef SANE_INSTRUCTION_SET.
1199 * intel "forgot" the variable shift count instruction until avx2.
1200 * A harmless 8x32 shift gets translated into 32 instructions
1201 * (16 extracts, 8 scalar shifts, 8 inserts), llvm is apparently
1202 * unable to recognize if there are really just 2 different shift
1203 * count values. So do the shift 4-wide before expansion.
1205 struct lp_build_context bld4
;
1206 struct lp_type type4
;
1208 type4
= bld
->int_coord_bld
.type
;
1211 lp_build_context_init(&bld4
, bld
->gallivm
, type4
);
1213 if (bld
->dims
== 1) {
1214 assert(bld
->int_size_in_bld
.type
.length
== 1);
1215 int_size_vec
= lp_build_broadcast_scalar(&bld4
,
1219 assert(bld
->int_size_in_bld
.type
.length
== 4);
1220 int_size_vec
= bld
->int_size
;
1223 for (i
= 0; i
< num_quads
; i
++) {
1224 LLVMValueRef ileveli
;
1225 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1227 ileveli
= lp_build_extract_broadcast(bld
->gallivm
,
1228 bld
->leveli_bld
.type
,
1232 tmp
[i
] = lp_build_minify(&bld4
, int_size_vec
, ileveli
);
1235 * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1,
1236 * [w0, w0, w0, w0, w1, w1, w1, w1, ...] otherwise.
1238 *out_size
= lp_build_concat(bld
->gallivm
,
1244 /* FIXME: this is terrible and results in _huge_ vector
1245 * (for the dims > 1 case).
1246 * Should refactor this (together with extract_image_sizes) and do
1247 * something more useful. Could for instance if we have width,height
1248 * with 4-wide vector pack all elements into a 8xi16 vector
1249 * (on which we can still do useful math) instead of using a 16xi32
1251 * FIXME: some callers can't handle this yet.
1252 * For dims == 1 this will create [w0, w1, w2, w3, ...] vector.
1253 * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector.
1255 assert(bld
->num_mips
== bld
->coord_bld
.type
.length
);
1256 if (bld
->dims
== 1) {
1257 assert(bld
->int_size_in_bld
.type
.length
== 1);
1258 int_size_vec
= lp_build_broadcast_scalar(&bld
->int_coord_bld
,
1260 /* vector shift with variable shift count alert... */
1261 *out_size
= lp_build_minify(&bld
->int_coord_bld
, int_size_vec
, ilevel
);
1264 LLVMValueRef ilevel1
;
1265 for (i
= 0; i
< bld
->num_mips
; i
++) {
1266 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1267 ilevel1
= lp_build_extract_broadcast(bld
->gallivm
, bld
->int_coord_type
,
1268 bld
->int_size_in_bld
.type
, ilevel
, indexi
);
1269 tmp
[i
] = bld
->int_size
;
1270 tmp
[i
] = lp_build_minify(&bld
->int_size_in_bld
, tmp
[i
], ilevel1
);
1272 *out_size
= lp_build_concat(bld
->gallivm
, tmp
,
1273 bld
->int_size_in_bld
.type
,
1280 *row_stride_vec
= lp_build_get_level_stride_vec(bld
,
1281 bld
->row_stride_array
,
1285 bld
->static_texture_state
->target
== PIPE_TEXTURE_CUBE
||
1286 bld
->static_texture_state
->target
== PIPE_TEXTURE_1D_ARRAY
||
1287 bld
->static_texture_state
->target
== PIPE_TEXTURE_2D_ARRAY
) {
1288 *img_stride_vec
= lp_build_get_level_stride_vec(bld
,
1289 bld
->img_stride_array
,
1296 * Extract and broadcast texture size.
1298 * @param size_type type of the texture size vector (either
1299 * bld->int_size_type or bld->float_size_type)
1300 * @param coord_type type of the texture size vector (either
1301 * bld->int_coord_type or bld->coord_type)
1302 * @param size vector with the texture size (width, height, depth)
1305 lp_build_extract_image_sizes(struct lp_build_sample_context
*bld
,
1306 struct lp_build_context
*size_bld
,
1307 struct lp_type coord_type
,
1309 LLVMValueRef
*out_width
,
1310 LLVMValueRef
*out_height
,
1311 LLVMValueRef
*out_depth
)
1313 const unsigned dims
= bld
->dims
;
1314 LLVMTypeRef i32t
= LLVMInt32TypeInContext(bld
->gallivm
->context
);
1315 struct lp_type size_type
= size_bld
->type
;
1317 if (bld
->num_mips
== 1) {
1318 *out_width
= lp_build_extract_broadcast(bld
->gallivm
,
1322 LLVMConstInt(i32t
, 0, 0));
1324 *out_height
= lp_build_extract_broadcast(bld
->gallivm
,
1328 LLVMConstInt(i32t
, 1, 0));
1330 *out_depth
= lp_build_extract_broadcast(bld
->gallivm
,
1334 LLVMConstInt(i32t
, 2, 0));
1339 unsigned num_quads
= bld
->coord_bld
.type
.length
/ 4;
1344 else if (bld
->num_mips
== num_quads
) {
1345 *out_width
= lp_build_swizzle_scalar_aos(size_bld
, size
, 0, 4);
1347 *out_height
= lp_build_swizzle_scalar_aos(size_bld
, size
, 1, 4);
1349 *out_depth
= lp_build_swizzle_scalar_aos(size_bld
, size
, 2, 4);
1354 assert(bld
->num_mips
== bld
->coord_type
.length
);
1355 *out_width
= lp_build_pack_aos_scalars(bld
->gallivm
, size_type
,
1356 coord_type
, size
, 0);
1358 *out_height
= lp_build_pack_aos_scalars(bld
->gallivm
, size_type
,
1359 coord_type
, size
, 1);
1361 *out_depth
= lp_build_pack_aos_scalars(bld
->gallivm
, size_type
,
1362 coord_type
, size
, 2);
1371 * Unnormalize coords.
1373 * @param flt_size vector with the integer texture size (width, height, depth)
1376 lp_build_unnormalized_coords(struct lp_build_sample_context
*bld
,
1377 LLVMValueRef flt_size
,
1382 const unsigned dims
= bld
->dims
;
1384 LLVMValueRef height
;
1387 lp_build_extract_image_sizes(bld
,
1388 &bld
->float_size_bld
,
1395 /* s = s * width, t = t * height */
1396 *s
= lp_build_mul(&bld
->coord_bld
, *s
, width
);
1398 *t
= lp_build_mul(&bld
->coord_bld
, *t
, height
);
1400 *r
= lp_build_mul(&bld
->coord_bld
, *r
, depth
);
1406 /** Helper used by lp_build_cube_lookup() */
1408 lp_build_cube_imapos(struct lp_build_context
*coord_bld
, LLVMValueRef coord
)
1410 /* ima = +0.5 / abs(coord); */
1411 LLVMValueRef posHalf
= lp_build_const_vec(coord_bld
->gallivm
, coord_bld
->type
, 0.5);
1412 LLVMValueRef absCoord
= lp_build_abs(coord_bld
, coord
);
1413 LLVMValueRef ima
= lp_build_div(coord_bld
, posHalf
, absCoord
);
1417 /** Helper used by lp_build_cube_lookup() */
1419 lp_build_cube_imaneg(struct lp_build_context
*coord_bld
, LLVMValueRef coord
)
1421 /* ima = -0.5 / abs(coord); */
1422 LLVMValueRef negHalf
= lp_build_const_vec(coord_bld
->gallivm
, coord_bld
->type
, -0.5);
1423 LLVMValueRef absCoord
= lp_build_abs(coord_bld
, coord
);
1424 LLVMValueRef ima
= lp_build_div(coord_bld
, negHalf
, absCoord
);
1429 * Helper used by lp_build_cube_lookup()
1430 * FIXME: the sign here can also be 0.
1431 * Arithmetically this could definitely make a difference. Either
1432 * fix the comment or use other (simpler) sign function, not sure
1433 * which one it should be.
1434 * \param sign scalar +1 or -1
1435 * \param coord float vector
1436 * \param ima float vector
1439 lp_build_cube_coord(struct lp_build_context
*coord_bld
,
1440 LLVMValueRef sign
, int negate_coord
,
1441 LLVMValueRef coord
, LLVMValueRef ima
)
1443 /* return negate(coord) * ima * sign + 0.5; */
1444 LLVMValueRef half
= lp_build_const_vec(coord_bld
->gallivm
, coord_bld
->type
, 0.5);
1447 assert(negate_coord
== +1 || negate_coord
== -1);
1449 if (negate_coord
== -1) {
1450 coord
= lp_build_negate(coord_bld
, coord
);
1453 res
= lp_build_mul(coord_bld
, coord
, ima
);
1455 sign
= lp_build_broadcast_scalar(coord_bld
, sign
);
1456 res
= lp_build_mul(coord_bld
, res
, sign
);
1458 res
= lp_build_add(coord_bld
, res
, half
);
1464 /** Helper used by lp_build_cube_lookup()
1465 * Return (major_coord >= 0) ? pos_face : neg_face;
1468 lp_build_cube_face(struct lp_build_sample_context
*bld
,
1469 LLVMValueRef major_coord
,
1470 unsigned pos_face
, unsigned neg_face
)
1472 struct gallivm_state
*gallivm
= bld
->gallivm
;
1473 LLVMBuilderRef builder
= gallivm
->builder
;
1474 LLVMValueRef cmp
= LLVMBuildFCmp(builder
, LLVMRealUGE
,
1476 bld
->float_bld
.zero
, "");
1477 LLVMValueRef pos
= lp_build_const_int32(gallivm
, pos_face
);
1478 LLVMValueRef neg
= lp_build_const_int32(gallivm
, neg_face
);
1479 LLVMValueRef res
= LLVMBuildSelect(builder
, cmp
, pos
, neg
, "");
1484 /** Helper for doing 3-wise selection.
1485 * Returns sel1 ? val2 : (sel0 ? val0 : val1).
1488 lp_build_select3(struct lp_build_context
*sel_bld
,
1496 tmp
= lp_build_select(sel_bld
, sel0
, val0
, val1
);
1497 return lp_build_select(sel_bld
, sel1
, val2
, tmp
);
1501 * Generate code to do cube face selection and compute per-face texcoords.
1504 lp_build_cube_lookup(struct lp_build_sample_context
*bld
,
1505 LLVMValueRef
*coords
,
1506 const struct lp_derivatives
*derivs_in
, /* optional */
1508 struct lp_derivatives
*derivs_out
, /* optional */
1509 boolean need_derivs
)
1511 struct lp_build_context
*coord_bld
= &bld
->coord_bld
;
1512 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1513 struct gallivm_state
*gallivm
= bld
->gallivm
;
1514 LLVMValueRef si
, ti
, ri
;
1516 if (1 || coord_bld
->type
.length
> 4) {
1518 * Do per-pixel face selection. We cannot however (as we used to do)
1519 * simply calculate the derivs afterwards (which is very bogus for
1520 * explicit derivs btw) because the values would be "random" when
1521 * not all pixels lie on the same face. So what we do here is just
1522 * calculate the derivatives after scaling the coords by the absolute
1523 * value of the inverse major axis, and essentially do rho calculation
1524 * steps as if it were a 3d texture. This is perfect if all pixels hit
1525 * the same face, but not so great at edges, I believe the max error
1526 * should be sqrt(2) with no_rho_approx or 2 otherwise (essentially measuring
1527 * the 3d distance between 2 points on the cube instead of measuring up/down
1528 * the edge). Still this is possibly a win over just selecting the same face
1529 * for all pixels. Unfortunately, something like that doesn't work for
1530 * explicit derivatives.
1532 struct lp_build_context
*cint_bld
= &bld
->int_coord_bld
;
1533 struct lp_type intctype
= cint_bld
->type
;
1534 LLVMTypeRef coord_vec_type
= coord_bld
->vec_type
;
1535 LLVMTypeRef cint_vec_type
= cint_bld
->vec_type
;
1536 LLVMValueRef as
, at
, ar
, face
, face_s
, face_t
;
1537 LLVMValueRef as_ge_at
, maxasat
, ar_ge_as_at
;
1538 LLVMValueRef snewx
, tnewx
, snewy
, tnewy
, snewz
, tnewz
;
1539 LLVMValueRef tnegi
, rnegi
;
1540 LLVMValueRef ma
, mai
, signma
, signmabit
, imahalfpos
;
1541 LLVMValueRef posHalf
= lp_build_const_vec(gallivm
, coord_bld
->type
, 0.5);
1542 LLVMValueRef signmask
= lp_build_const_int_vec(gallivm
, intctype
,
1543 1 << (intctype
.width
- 1));
1544 LLVMValueRef signshift
= lp_build_const_int_vec(gallivm
, intctype
,
1546 LLVMValueRef facex
= lp_build_const_int_vec(gallivm
, intctype
, PIPE_TEX_FACE_POS_X
);
1547 LLVMValueRef facey
= lp_build_const_int_vec(gallivm
, intctype
, PIPE_TEX_FACE_POS_Y
);
1548 LLVMValueRef facez
= lp_build_const_int_vec(gallivm
, intctype
, PIPE_TEX_FACE_POS_Z
);
1549 LLVMValueRef s
= coords
[0];
1550 LLVMValueRef t
= coords
[1];
1551 LLVMValueRef r
= coords
[2];
1553 assert(PIPE_TEX_FACE_NEG_X
== PIPE_TEX_FACE_POS_X
+ 1);
1554 assert(PIPE_TEX_FACE_NEG_Y
== PIPE_TEX_FACE_POS_Y
+ 1);
1555 assert(PIPE_TEX_FACE_NEG_Z
== PIPE_TEX_FACE_POS_Z
+ 1);
1558 * get absolute value (for x/y/z face selection) and sign bit
1559 * (for mirroring minor coords and pos/neg face selection)
1560 * of the original coords.
1562 as
= lp_build_abs(&bld
->coord_bld
, s
);
1563 at
= lp_build_abs(&bld
->coord_bld
, t
);
1564 ar
= lp_build_abs(&bld
->coord_bld
, r
);
1567 * major face determination: select x if x > y else select y
1568 * select z if z >= max(x,y) else select previous result
1569 * if some axis are the same we chose z over y, y over x - the
1570 * dx10 spec seems to ask for it while OpenGL doesn't care (if we
1571 * wouldn't care could save a select or two if using different
1572 * compares and doing at_g_as_ar last since tnewx and tnewz are the
1575 as_ge_at
= lp_build_cmp(coord_bld
, PIPE_FUNC_GREATER
, as
, at
);
1576 maxasat
= lp_build_max(coord_bld
, as
, at
);
1577 ar_ge_as_at
= lp_build_cmp(coord_bld
, PIPE_FUNC_GEQUAL
, ar
, maxasat
);
1579 if (need_derivs
&& (derivs_in
||
1580 ((gallivm_debug
& GALLIVM_DEBUG_NO_QUAD_LOD
) &&
1581 (gallivm_debug
& GALLIVM_DEBUG_NO_RHO_APPROX
)))) {
1583 * XXX: This is really really complex.
1584 * It is a bit overkill to use this for implicit derivatives as well,
1585 * no way this is worth the cost in practice, but seems to be the
1586 * only way for getting accurate and per-pixel lod values.
1588 LLVMValueRef ima
, imahalf
, tmp
, ddx
[3], ddy
[3];
1589 LLVMValueRef madx
, mady
, madxdivma
, madydivma
;
1590 LLVMValueRef sdxi
, tdxi
, rdxi
, sdyi
, tdyi
, rdyi
;
1591 LLVMValueRef tdxnegi
, rdxnegi
, tdynegi
, rdynegi
;
1592 LLVMValueRef sdxnewx
, sdxnewy
, sdxnewz
, tdxnewx
, tdxnewy
, tdxnewz
;
1593 LLVMValueRef sdynewx
, sdynewy
, sdynewz
, tdynewx
, tdynewy
, tdynewz
;
1594 LLVMValueRef face_sdx
, face_tdx
, face_sdy
, face_tdy
;
1596 * s = 1/2 * ( sc / ma + 1)
1597 * t = 1/2 * ( tc / ma + 1)
1599 * s' = 1/2 * (sc' * ma - sc * ma') / ma^2
1600 * t' = 1/2 * (tc' * ma - tc * ma') / ma^2
1602 * dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma
1603 * dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma
1604 * dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma
1605 * dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma
1608 /* select ma, calculate ima */
1609 ma
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, s
, t
, r
);
1610 mai
= LLVMBuildBitCast(builder
, ma
, cint_vec_type
, "");
1611 signmabit
= LLVMBuildAnd(builder
, mai
, signmask
, "");
1612 ima
= lp_build_div(coord_bld
, coord_bld
->one
, ma
);
1613 imahalf
= lp_build_mul(coord_bld
, posHalf
, ima
);
1614 imahalfpos
= lp_build_abs(coord_bld
, imahalf
);
1617 ddx
[0] = lp_build_ddx(coord_bld
, s
);
1618 ddx
[1] = lp_build_ddx(coord_bld
, t
);
1619 ddx
[2] = lp_build_ddx(coord_bld
, r
);
1620 ddy
[0] = lp_build_ddy(coord_bld
, s
);
1621 ddy
[1] = lp_build_ddy(coord_bld
, t
);
1622 ddy
[2] = lp_build_ddy(coord_bld
, r
);
1625 ddx
[0] = derivs_in
->ddx
[0];
1626 ddx
[1] = derivs_in
->ddx
[1];
1627 ddx
[2] = derivs_in
->ddx
[2];
1628 ddy
[0] = derivs_in
->ddy
[0];
1629 ddy
[1] = derivs_in
->ddy
[1];
1630 ddy
[2] = derivs_in
->ddy
[2];
1633 /* select major derivatives */
1634 madx
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, ddx
[0], ddx
[1], ddx
[2]);
1635 mady
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, ddy
[0], ddy
[1], ddy
[2]);
1637 si
= LLVMBuildBitCast(builder
, s
, cint_vec_type
, "");
1638 ti
= LLVMBuildBitCast(builder
, t
, cint_vec_type
, "");
1639 ri
= LLVMBuildBitCast(builder
, r
, cint_vec_type
, "");
1641 sdxi
= LLVMBuildBitCast(builder
, ddx
[0], cint_vec_type
, "");
1642 tdxi
= LLVMBuildBitCast(builder
, ddx
[1], cint_vec_type
, "");
1643 rdxi
= LLVMBuildBitCast(builder
, ddx
[2], cint_vec_type
, "");
1645 sdyi
= LLVMBuildBitCast(builder
, ddy
[0], cint_vec_type
, "");
1646 tdyi
= LLVMBuildBitCast(builder
, ddy
[1], cint_vec_type
, "");
1647 rdyi
= LLVMBuildBitCast(builder
, ddy
[2], cint_vec_type
, "");
1650 * compute all possible new s/t coords, which does the mirroring,
1651 * and do the same for derivs minor axes.
1652 * snewx = signma * -r;
1655 * tnewy = signma * r;
1656 * snewz = signma * s;
1659 tnegi
= LLVMBuildXor(builder
, ti
, signmask
, "");
1660 rnegi
= LLVMBuildXor(builder
, ri
, signmask
, "");
1661 tdxnegi
= LLVMBuildXor(builder
, tdxi
, signmask
, "");
1662 rdxnegi
= LLVMBuildXor(builder
, rdxi
, signmask
, "");
1663 tdynegi
= LLVMBuildXor(builder
, tdyi
, signmask
, "");
1664 rdynegi
= LLVMBuildXor(builder
, rdyi
, signmask
, "");
1666 snewx
= LLVMBuildXor(builder
, signmabit
, rnegi
, "");
1668 sdxnewx
= LLVMBuildXor(builder
, signmabit
, rdxnegi
, "");
1670 sdynewx
= LLVMBuildXor(builder
, signmabit
, rdynegi
, "");
1674 tnewy
= LLVMBuildXor(builder
, signmabit
, ri
, "");
1676 tdxnewy
= LLVMBuildXor(builder
, signmabit
, rdxi
, "");
1678 tdynewy
= LLVMBuildXor(builder
, signmabit
, rdyi
, "");
1680 snewz
= LLVMBuildXor(builder
, signmabit
, si
, "");
1682 sdxnewz
= LLVMBuildXor(builder
, signmabit
, sdxi
, "");
1684 sdynewz
= LLVMBuildXor(builder
, signmabit
, sdyi
, "");
1687 /* select the mirrored values */
1688 face
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, facex
, facey
, facez
);
1689 face_s
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, snewx
, snewy
, snewz
);
1690 face_t
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, tnewx
, tnewy
, tnewz
);
1691 face_sdx
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, sdxnewx
, sdxnewy
, sdxnewz
);
1692 face_tdx
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, tdxnewx
, tdxnewy
, tdxnewz
);
1693 face_sdy
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, sdynewx
, sdynewy
, sdynewz
);
1694 face_tdy
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, tdynewx
, tdynewy
, tdynewz
);
1696 face_s
= LLVMBuildBitCast(builder
, face_s
, coord_vec_type
, "");
1697 face_t
= LLVMBuildBitCast(builder
, face_t
, coord_vec_type
, "");
1698 face_sdx
= LLVMBuildBitCast(builder
, face_sdx
, coord_vec_type
, "");
1699 face_tdx
= LLVMBuildBitCast(builder
, face_tdx
, coord_vec_type
, "");
1700 face_sdy
= LLVMBuildBitCast(builder
, face_sdy
, coord_vec_type
, "");
1701 face_tdy
= LLVMBuildBitCast(builder
, face_tdy
, coord_vec_type
, "");
1703 /* deriv math, dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma */
1704 madxdivma
= lp_build_mul(coord_bld
, madx
, ima
);
1705 tmp
= lp_build_mul(coord_bld
, madxdivma
, face_s
);
1706 tmp
= lp_build_sub(coord_bld
, face_sdx
, tmp
);
1707 derivs_out
->ddx
[0] = lp_build_mul(coord_bld
, tmp
, imahalf
);
1709 /* dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma */
1710 tmp
= lp_build_mul(coord_bld
, madxdivma
, face_t
);
1711 tmp
= lp_build_sub(coord_bld
, face_tdx
, tmp
);
1712 derivs_out
->ddx
[1] = lp_build_mul(coord_bld
, tmp
, imahalf
);
1714 /* dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma */
1715 madydivma
= lp_build_mul(coord_bld
, mady
, ima
);
1716 tmp
= lp_build_mul(coord_bld
, madydivma
, face_s
);
1717 tmp
= lp_build_sub(coord_bld
, face_sdy
, tmp
);
1718 derivs_out
->ddy
[0] = lp_build_mul(coord_bld
, tmp
, imahalf
);
1720 /* dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma */
1721 tmp
= lp_build_mul(coord_bld
, madydivma
, face_t
);
1722 tmp
= lp_build_sub(coord_bld
, face_tdy
, tmp
);
1723 derivs_out
->ddy
[1] = lp_build_mul(coord_bld
, tmp
, imahalf
);
1725 signma
= LLVMBuildLShr(builder
, mai
, signshift
, "");
1726 coords
[2] = LLVMBuildOr(builder
, face
, signma
, "face");
1728 /* project coords */
1729 face_s
= lp_build_mul(coord_bld
, face_s
, imahalfpos
);
1730 face_t
= lp_build_mul(coord_bld
, face_t
, imahalfpos
);
1732 coords
[0] = lp_build_add(coord_bld
, face_s
, posHalf
);
1733 coords
[1] = lp_build_add(coord_bld
, face_t
, posHalf
);
1738 else if (need_derivs
) {
1739 LLVMValueRef ddx_ddy
[2], tmp
[3], rho_vec
;
1740 static const unsigned char swizzle0
[] = { /* no-op swizzle */
1741 0, LP_BLD_SWIZZLE_DONTCARE
,
1742 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1744 static const unsigned char swizzle1
[] = {
1745 1, LP_BLD_SWIZZLE_DONTCARE
,
1746 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1748 static const unsigned char swizzle01
[] = { /* no-op swizzle */
1750 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1752 static const unsigned char swizzle23
[] = {
1754 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1756 static const unsigned char swizzle02
[] = {
1758 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1762 * scale the s/t/r coords pre-select/mirror so we can calculate
1763 * "reasonable" derivs.
1765 ma
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, s
, t
, r
);
1766 imahalfpos
= lp_build_cube_imapos(coord_bld
, ma
);
1767 s
= lp_build_mul(coord_bld
, s
, imahalfpos
);
1768 t
= lp_build_mul(coord_bld
, t
, imahalfpos
);
1769 r
= lp_build_mul(coord_bld
, r
, imahalfpos
);
1772 * This isn't quite the same as the "ordinary" (3d deriv) path since we
1773 * know the texture is square which simplifies things (we can omit the
1774 * size mul which happens very early completely here and do it at the
1776 * Also always do calculations according to GALLIVM_DEBUG_NO_RHO_APPROX
1777 * since the error can get quite big otherwise at edges.
1778 * (With no_rho_approx max error is sqrt(2) at edges, same as it is
1779 * without no_rho_approx for 2d textures, otherwise it would be factor 2.)
1781 ddx_ddy
[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld
, s
, t
);
1782 ddx_ddy
[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld
, r
);
1784 ddx_ddy
[0] = lp_build_mul(coord_bld
, ddx_ddy
[0], ddx_ddy
[0]);
1785 ddx_ddy
[1] = lp_build_mul(coord_bld
, ddx_ddy
[1], ddx_ddy
[1]);
1787 tmp
[0] = lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle01
);
1788 tmp
[1] = lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle23
);
1789 tmp
[2] = lp_build_swizzle_aos(coord_bld
, ddx_ddy
[1], swizzle02
);
1791 rho_vec
= lp_build_add(coord_bld
, tmp
[0], tmp
[1]);
1792 rho_vec
= lp_build_add(coord_bld
, rho_vec
, tmp
[2]);
1794 tmp
[0] = lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle0
);
1795 tmp
[1] = lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle1
);
1796 *rho
= lp_build_max(coord_bld
, tmp
[0], tmp
[1]);
1800 ma
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, s
, t
, r
);
1802 mai
= LLVMBuildBitCast(builder
, ma
, cint_vec_type
, "");
1803 signmabit
= LLVMBuildAnd(builder
, mai
, signmask
, "");
1805 si
= LLVMBuildBitCast(builder
, s
, cint_vec_type
, "");
1806 ti
= LLVMBuildBitCast(builder
, t
, cint_vec_type
, "");
1807 ri
= LLVMBuildBitCast(builder
, r
, cint_vec_type
, "");
1810 * compute all possible new s/t coords, which does the mirroring
1811 * snewx = signma * -r;
1814 * tnewy = signma * r;
1815 * snewz = signma * s;
1818 tnegi
= LLVMBuildXor(builder
, ti
, signmask
, "");
1819 rnegi
= LLVMBuildXor(builder
, ri
, signmask
, "");
1821 snewx
= LLVMBuildXor(builder
, signmabit
, rnegi
, "");
1825 tnewy
= LLVMBuildXor(builder
, signmabit
, ri
, "");
1827 snewz
= LLVMBuildXor(builder
, signmabit
, si
, "");
1830 /* select the mirrored values */
1831 face_s
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, snewx
, snewy
, snewz
);
1832 face_t
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, tnewx
, tnewy
, tnewz
);
1833 face
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, facex
, facey
, facez
);
1835 face_s
= LLVMBuildBitCast(builder
, face_s
, coord_vec_type
, "");
1836 face_t
= LLVMBuildBitCast(builder
, face_t
, coord_vec_type
, "");
1838 /* add +1 for neg face */
1839 /* XXX with AVX probably want to use another select here -
1840 * as long as we ensure vblendvps gets used we can actually
1841 * skip the comparison and just use sign as a "mask" directly.
1843 signma
= LLVMBuildLShr(builder
, mai
, signshift
, "");
1844 coords
[2] = LLVMBuildOr(builder
, face
, signma
, "face");
1846 /* project coords */
1848 imahalfpos
= lp_build_cube_imapos(coord_bld
, ma
);
1849 face_s
= lp_build_mul(coord_bld
, face_s
, imahalfpos
);
1850 face_t
= lp_build_mul(coord_bld
, face_t
, imahalfpos
);
1853 coords
[0] = lp_build_add(coord_bld
, face_s
, posHalf
);
1854 coords
[1] = lp_build_add(coord_bld
, face_t
, posHalf
);
1858 struct lp_build_if_state if_ctx
;
1859 LLVMValueRef face_s_var
;
1860 LLVMValueRef face_t_var
;
1861 LLVMValueRef face_var
;
1862 LLVMValueRef arx_ge_ary_arz
, ary_ge_arx_arz
;
1863 LLVMValueRef shuffles
[4];
1864 LLVMValueRef arxy_ge_aryx
, arxy_ge_arzz
, arxy_ge_arxy_arzz
;
1865 LLVMValueRef arxyxy
, aryxzz
, arxyxy_ge_aryxzz
;
1866 LLVMValueRef tmp
[4], rxyz
, arxyz
;
1867 struct lp_build_context
*float_bld
= &bld
->float_bld
;
1868 LLVMValueRef s
, t
, r
, face
, face_s
, face_t
;
1870 assert(bld
->coord_bld
.type
.length
== 4);
1872 tmp
[0] = s
= coords
[0];
1873 tmp
[1] = t
= coords
[1];
1874 tmp
[2] = r
= coords
[2];
1875 rxyz
= lp_build_hadd_partial4(&bld
->coord_bld
, tmp
, 3);
1876 arxyz
= lp_build_abs(&bld
->coord_bld
, rxyz
);
1878 shuffles
[0] = lp_build_const_int32(gallivm
, 0);
1879 shuffles
[1] = lp_build_const_int32(gallivm
, 1);
1880 shuffles
[2] = lp_build_const_int32(gallivm
, 0);
1881 shuffles
[3] = lp_build_const_int32(gallivm
, 1);
1882 arxyxy
= LLVMBuildShuffleVector(builder
, arxyz
, arxyz
, LLVMConstVector(shuffles
, 4), "");
1883 shuffles
[0] = lp_build_const_int32(gallivm
, 1);
1884 shuffles
[1] = lp_build_const_int32(gallivm
, 0);
1885 shuffles
[2] = lp_build_const_int32(gallivm
, 2);
1886 shuffles
[3] = lp_build_const_int32(gallivm
, 2);
1887 aryxzz
= LLVMBuildShuffleVector(builder
, arxyz
, arxyz
, LLVMConstVector(shuffles
, 4), "");
1888 arxyxy_ge_aryxzz
= lp_build_cmp(&bld
->coord_bld
, PIPE_FUNC_GEQUAL
, arxyxy
, aryxzz
);
1890 shuffles
[0] = lp_build_const_int32(gallivm
, 0);
1891 shuffles
[1] = lp_build_const_int32(gallivm
, 1);
1892 arxy_ge_aryx
= LLVMBuildShuffleVector(builder
, arxyxy_ge_aryxzz
, arxyxy_ge_aryxzz
,
1893 LLVMConstVector(shuffles
, 2), "");
1894 shuffles
[0] = lp_build_const_int32(gallivm
, 2);
1895 shuffles
[1] = lp_build_const_int32(gallivm
, 3);
1896 arxy_ge_arzz
= LLVMBuildShuffleVector(builder
, arxyxy_ge_aryxzz
, arxyxy_ge_aryxzz
,
1897 LLVMConstVector(shuffles
, 2), "");
1898 arxy_ge_arxy_arzz
= LLVMBuildAnd(builder
, arxy_ge_aryx
, arxy_ge_arzz
, "");
1900 arx_ge_ary_arz
= LLVMBuildExtractElement(builder
, arxy_ge_arxy_arzz
,
1901 lp_build_const_int32(gallivm
, 0), "");
1902 arx_ge_ary_arz
= LLVMBuildICmp(builder
, LLVMIntNE
, arx_ge_ary_arz
,
1903 lp_build_const_int32(gallivm
, 0), "");
1904 ary_ge_arx_arz
= LLVMBuildExtractElement(builder
, arxy_ge_arxy_arzz
,
1905 lp_build_const_int32(gallivm
, 1), "");
1906 ary_ge_arx_arz
= LLVMBuildICmp(builder
, LLVMIntNE
, ary_ge_arx_arz
,
1907 lp_build_const_int32(gallivm
, 0), "");
1908 face_s_var
= lp_build_alloca(gallivm
, bld
->coord_bld
.vec_type
, "face_s_var");
1909 face_t_var
= lp_build_alloca(gallivm
, bld
->coord_bld
.vec_type
, "face_t_var");
1910 face_var
= lp_build_alloca(gallivm
, bld
->int_bld
.vec_type
, "face_var");
1912 lp_build_if(&if_ctx
, gallivm
, arx_ge_ary_arz
);
1915 LLVMValueRef sign
, ima
;
1916 si
= LLVMBuildExtractElement(builder
, rxyz
,
1917 lp_build_const_int32(gallivm
, 0), "");
1919 sign
= lp_build_sgn(float_bld
, si
);
1920 ima
= lp_build_cube_imaneg(coord_bld
, s
);
1921 face_s
= lp_build_cube_coord(coord_bld
, sign
, +1, r
, ima
);
1922 face_t
= lp_build_cube_coord(coord_bld
, NULL
, +1, t
, ima
);
1923 face
= lp_build_cube_face(bld
, si
,
1924 PIPE_TEX_FACE_POS_X
,
1925 PIPE_TEX_FACE_NEG_X
);
1926 LLVMBuildStore(builder
, face_s
, face_s_var
);
1927 LLVMBuildStore(builder
, face_t
, face_t_var
);
1928 LLVMBuildStore(builder
, face
, face_var
);
1930 lp_build_else(&if_ctx
);
1932 struct lp_build_if_state if_ctx2
;
1934 lp_build_if(&if_ctx2
, gallivm
, ary_ge_arx_arz
);
1936 LLVMValueRef sign
, ima
;
1938 ti
= LLVMBuildExtractElement(builder
, rxyz
,
1939 lp_build_const_int32(gallivm
, 1), "");
1940 sign
= lp_build_sgn(float_bld
, ti
);
1941 ima
= lp_build_cube_imaneg(coord_bld
, t
);
1942 face_s
= lp_build_cube_coord(coord_bld
, NULL
, -1, s
, ima
);
1943 face_t
= lp_build_cube_coord(coord_bld
, sign
, -1, r
, ima
);
1944 face
= lp_build_cube_face(bld
, ti
,
1945 PIPE_TEX_FACE_POS_Y
,
1946 PIPE_TEX_FACE_NEG_Y
);
1947 LLVMBuildStore(builder
, face_s
, face_s_var
);
1948 LLVMBuildStore(builder
, face_t
, face_t_var
);
1949 LLVMBuildStore(builder
, face
, face_var
);
1951 lp_build_else(&if_ctx2
);
1954 LLVMValueRef sign
, ima
;
1955 ri
= LLVMBuildExtractElement(builder
, rxyz
,
1956 lp_build_const_int32(gallivm
, 2), "");
1957 sign
= lp_build_sgn(float_bld
, ri
);
1958 ima
= lp_build_cube_imaneg(coord_bld
, r
);
1959 face_s
= lp_build_cube_coord(coord_bld
, sign
, -1, s
, ima
);
1960 face_t
= lp_build_cube_coord(coord_bld
, NULL
, +1, t
, ima
);
1961 face
= lp_build_cube_face(bld
, ri
,
1962 PIPE_TEX_FACE_POS_Z
,
1963 PIPE_TEX_FACE_NEG_Z
);
1964 LLVMBuildStore(builder
, face_s
, face_s_var
);
1965 LLVMBuildStore(builder
, face_t
, face_t_var
);
1966 LLVMBuildStore(builder
, face
, face_var
);
1968 lp_build_endif(&if_ctx2
);
1971 lp_build_endif(&if_ctx
);
1973 coords
[0] = LLVMBuildLoad(builder
, face_s_var
, "face_s");
1974 coords
[1] = LLVMBuildLoad(builder
, face_t_var
, "face_t");
1975 face
= LLVMBuildLoad(builder
, face_var
, "face");
1976 coords
[2] = lp_build_broadcast_scalar(&bld
->int_coord_bld
, face
);
1982 * Compute the partial offset of a pixel block along an arbitrary axis.
1984 * @param coord coordinate in pixels
1985 * @param stride number of bytes between rows of successive pixel blocks
1986 * @param block_length number of pixels in a pixels block along the coordinate
1988 * @param out_offset resulting relative offset of the pixel block in bytes
1989 * @param out_subcoord resulting sub-block pixel coordinate
1992 lp_build_sample_partial_offset(struct lp_build_context
*bld
,
1993 unsigned block_length
,
1995 LLVMValueRef stride
,
1996 LLVMValueRef
*out_offset
,
1997 LLVMValueRef
*out_subcoord
)
1999 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
2000 LLVMValueRef offset
;
2001 LLVMValueRef subcoord
;
2003 if (block_length
== 1) {
2004 subcoord
= bld
->zero
;
2008 * Pixel blocks have power of two dimensions. LLVM should convert the
2009 * rem/div to bit arithmetic.
2010 * TODO: Verify this.
2011 * It does indeed BUT it does transform it to scalar (and back) when doing so
2012 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
2013 * The generated code looks seriously unfunny and is quite expensive.
2016 LLVMValueRef block_width
= lp_build_const_int_vec(bld
->type
, block_length
);
2017 subcoord
= LLVMBuildURem(builder
, coord
, block_width
, "");
2018 coord
= LLVMBuildUDiv(builder
, coord
, block_width
, "");
2020 unsigned logbase2
= util_logbase2(block_length
);
2021 LLVMValueRef block_shift
= lp_build_const_int_vec(bld
->gallivm
, bld
->type
, logbase2
);
2022 LLVMValueRef block_mask
= lp_build_const_int_vec(bld
->gallivm
, bld
->type
, block_length
- 1);
2023 subcoord
= LLVMBuildAnd(builder
, coord
, block_mask
, "");
2024 coord
= LLVMBuildLShr(builder
, coord
, block_shift
, "");
2028 offset
= lp_build_mul(bld
, coord
, stride
);
2031 assert(out_subcoord
);
2033 *out_offset
= offset
;
2034 *out_subcoord
= subcoord
;
2039 * Compute the offset of a pixel block.
2041 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
2043 * Returns the relative offset and i,j sub-block coordinates
2046 lp_build_sample_offset(struct lp_build_context
*bld
,
2047 const struct util_format_description
*format_desc
,
2051 LLVMValueRef y_stride
,
2052 LLVMValueRef z_stride
,
2053 LLVMValueRef
*out_offset
,
2054 LLVMValueRef
*out_i
,
2055 LLVMValueRef
*out_j
)
2057 LLVMValueRef x_stride
;
2058 LLVMValueRef offset
;
2060 x_stride
= lp_build_const_vec(bld
->gallivm
, bld
->type
,
2061 format_desc
->block
.bits
/8);
2063 lp_build_sample_partial_offset(bld
,
2064 format_desc
->block
.width
,
2068 if (y
&& y_stride
) {
2069 LLVMValueRef y_offset
;
2070 lp_build_sample_partial_offset(bld
,
2071 format_desc
->block
.height
,
2074 offset
= lp_build_add(bld
, offset
, y_offset
);
2080 if (z
&& z_stride
) {
2081 LLVMValueRef z_offset
;
2083 lp_build_sample_partial_offset(bld
,
2084 1, /* pixel blocks are always 2D */
2087 offset
= lp_build_add(bld
, offset
, z_offset
);
2090 *out_offset
= offset
;