1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Texture sampling -- common code.
32 * @author Jose Fonseca <jfonseca@vmware.com>
35 #include "pipe/p_defines.h"
36 #include "pipe/p_state.h"
37 #include "util/format/u_format.h"
38 #include "util/u_math.h"
39 #include "util/u_cpu_detect.h"
40 #include "lp_bld_arit.h"
41 #include "lp_bld_const.h"
42 #include "lp_bld_debug.h"
43 #include "lp_bld_printf.h"
44 #include "lp_bld_flow.h"
45 #include "lp_bld_sample.h"
46 #include "lp_bld_swizzle.h"
47 #include "lp_bld_type.h"
48 #include "lp_bld_logic.h"
49 #include "lp_bld_pack.h"
50 #include "lp_bld_quad.h"
51 #include "lp_bld_bitarit.h"
55 * Bri-linear factor. Should be greater than one.
57 #define BRILINEAR_FACTOR 2
60 * Does the given texture wrap mode allow sampling the texture border color?
61 * XXX maybe move this into gallium util code.
64 lp_sampler_wrap_mode_uses_border_color(unsigned mode
,
65 unsigned min_img_filter
,
66 unsigned mag_img_filter
)
69 case PIPE_TEX_WRAP_REPEAT
:
70 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
:
71 case PIPE_TEX_WRAP_MIRROR_REPEAT
:
72 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
:
74 case PIPE_TEX_WRAP_CLAMP
:
75 case PIPE_TEX_WRAP_MIRROR_CLAMP
:
76 if (min_img_filter
== PIPE_TEX_FILTER_NEAREST
&&
77 mag_img_filter
== PIPE_TEX_FILTER_NEAREST
) {
82 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
:
83 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
:
86 assert(0 && "unexpected wrap mode");
93 * Initialize lp_sampler_static_texture_state object with the gallium
94 * texture/sampler_view state (this contains the parts which are
98 lp_sampler_static_texture_state(struct lp_static_texture_state
*state
,
99 const struct pipe_sampler_view
*view
)
101 const struct pipe_resource
*texture
;
103 memset(state
, 0, sizeof *state
);
105 if (!view
|| !view
->texture
)
108 texture
= view
->texture
;
110 state
->format
= view
->format
;
111 state
->swizzle_r
= view
->swizzle_r
;
112 state
->swizzle_g
= view
->swizzle_g
;
113 state
->swizzle_b
= view
->swizzle_b
;
114 state
->swizzle_a
= view
->swizzle_a
;
116 state
->target
= view
->target
;
117 state
->pot_width
= util_is_power_of_two_or_zero(texture
->width0
);
118 state
->pot_height
= util_is_power_of_two_or_zero(texture
->height0
);
119 state
->pot_depth
= util_is_power_of_two_or_zero(texture
->depth0
);
120 state
->level_zero_only
= !view
->u
.tex
.last_level
;
123 * the layer / element / level parameters are all either dynamic
124 * state or handled transparently wrt execution.
129 * Initialize lp_sampler_static_texture_state object with the gallium
130 * texture/sampler_view state (this contains the parts which are
131 * considered static).
134 lp_sampler_static_texture_state_image(struct lp_static_texture_state
*state
,
135 const struct pipe_image_view
*view
)
137 const struct pipe_resource
*resource
;
139 memset(state
, 0, sizeof *state
);
141 if (!view
|| !view
->resource
)
144 resource
= view
->resource
;
146 state
->format
= view
->format
;
147 state
->swizzle_r
= PIPE_SWIZZLE_X
;
148 state
->swizzle_g
= PIPE_SWIZZLE_Y
;
149 state
->swizzle_b
= PIPE_SWIZZLE_Z
;
150 state
->swizzle_a
= PIPE_SWIZZLE_W
;
152 state
->target
= view
->resource
->target
;
153 state
->pot_width
= util_is_power_of_two_or_zero(resource
->width0
);
154 state
->pot_height
= util_is_power_of_two_or_zero(resource
->height0
);
155 state
->pot_depth
= util_is_power_of_two_or_zero(resource
->depth0
);
156 state
->level_zero_only
= 0;
159 * the layer / element / level parameters are all either dynamic
160 * state or handled transparently wrt execution.
165 * Initialize lp_sampler_static_sampler_state object with the gallium sampler
166 * state (this contains the parts which are considered static).
169 lp_sampler_static_sampler_state(struct lp_static_sampler_state
*state
,
170 const struct pipe_sampler_state
*sampler
)
172 memset(state
, 0, sizeof *state
);
178 * We don't copy sampler state over unless it is actually enabled, to avoid
179 * spurious recompiles, as the sampler static state is part of the shader
182 * Ideally gallium frontends or cso_cache module would make all state
183 * canonical, but until that happens it's better to be safe than sorry here.
185 * XXX: Actually there's much more than can be done here, especially
186 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
189 state
->wrap_s
= sampler
->wrap_s
;
190 state
->wrap_t
= sampler
->wrap_t
;
191 state
->wrap_r
= sampler
->wrap_r
;
192 state
->min_img_filter
= sampler
->min_img_filter
;
193 state
->mag_img_filter
= sampler
->mag_img_filter
;
194 state
->min_mip_filter
= sampler
->min_mip_filter
;
195 state
->seamless_cube_map
= sampler
->seamless_cube_map
;
197 if (sampler
->max_lod
> 0.0f
) {
198 state
->max_lod_pos
= 1;
201 if (sampler
->lod_bias
!= 0.0f
) {
202 state
->lod_bias_non_zero
= 1;
205 if (state
->min_mip_filter
!= PIPE_TEX_MIPFILTER_NONE
||
206 state
->min_img_filter
!= state
->mag_img_filter
) {
208 /* If min_lod == max_lod we can greatly simplify mipmap selection.
209 * This is a case that occurs during automatic mipmap generation.
211 if (sampler
->min_lod
== sampler
->max_lod
) {
212 state
->min_max_lod_equal
= 1;
214 if (sampler
->min_lod
> 0.0f
) {
215 state
->apply_min_lod
= 1;
219 * XXX this won't do anything with the mesa state tracker which always
220 * sets max_lod to not more than actually present mip maps...
222 if (sampler
->max_lod
< (PIPE_MAX_TEXTURE_LEVELS
- 1)) {
223 state
->apply_max_lod
= 1;
228 state
->compare_mode
= sampler
->compare_mode
;
229 if (sampler
->compare_mode
!= PIPE_TEX_COMPARE_NONE
) {
230 state
->compare_func
= sampler
->compare_func
;
233 state
->normalized_coords
= sampler
->normalized_coords
;
238 * Generate code to compute coordinate gradient (rho).
239 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
241 * The resulting rho has bld->levelf format (per quad or per element).
244 lp_build_rho(struct lp_build_sample_context
*bld
,
245 unsigned texture_unit
,
249 LLVMValueRef cube_rho
,
250 const struct lp_derivatives
*derivs
)
252 struct gallivm_state
*gallivm
= bld
->gallivm
;
253 struct lp_build_context
*int_size_bld
= &bld
->int_size_in_bld
;
254 struct lp_build_context
*float_size_bld
= &bld
->float_size_in_bld
;
255 struct lp_build_context
*float_bld
= &bld
->float_bld
;
256 struct lp_build_context
*coord_bld
= &bld
->coord_bld
;
257 struct lp_build_context
*rho_bld
= &bld
->lodf_bld
;
258 const unsigned dims
= bld
->dims
;
259 LLVMValueRef ddx_ddy
[2] = {NULL
};
260 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
261 LLVMTypeRef i32t
= LLVMInt32TypeInContext(bld
->gallivm
->context
);
262 LLVMValueRef index0
= LLVMConstInt(i32t
, 0, 0);
263 LLVMValueRef index1
= LLVMConstInt(i32t
, 1, 0);
264 LLVMValueRef index2
= LLVMConstInt(i32t
, 2, 0);
265 LLVMValueRef rho_vec
;
266 LLVMValueRef int_size
, float_size
;
268 LLVMValueRef first_level
, first_level_vec
;
269 unsigned length
= coord_bld
->type
.length
;
270 unsigned num_quads
= length
/ 4;
271 boolean rho_per_quad
= rho_bld
->type
.length
!= length
;
272 boolean no_rho_opt
= bld
->no_rho_approx
&& (dims
> 1);
274 LLVMValueRef i32undef
= LLVMGetUndef(LLVMInt32TypeInContext(gallivm
->context
));
275 LLVMValueRef rho_xvec
, rho_yvec
;
277 /* Note that all simplified calculations will only work for isotropic filtering */
280 * rho calcs are always per quad except for explicit derivs (excluding
281 * the messy cube maps for now) when requested.
284 first_level
= bld
->dynamic_state
->first_level(bld
->dynamic_state
, bld
->gallivm
,
285 bld
->context_ptr
, texture_unit
, NULL
);
286 first_level_vec
= lp_build_broadcast_scalar(int_size_bld
, first_level
);
287 int_size
= lp_build_minify(int_size_bld
, bld
->int_size
, first_level_vec
, TRUE
);
288 float_size
= lp_build_int_to_float(float_size_bld
, int_size
);
291 LLVMValueRef cubesize
;
292 LLVMValueRef index0
= lp_build_const_int32(gallivm
, 0);
295 * Cube map code did already everything except size mul and per-quad extraction.
296 * Luckily cube maps are always quadratic!
299 rho
= lp_build_pack_aos_scalars(bld
->gallivm
, coord_bld
->type
,
300 rho_bld
->type
, cube_rho
, 0);
303 rho
= lp_build_swizzle_scalar_aos(coord_bld
, cube_rho
, 0, 4);
305 /* Could optimize this for single quad just skip the broadcast */
306 cubesize
= lp_build_extract_broadcast(gallivm
, bld
->float_size_in_type
,
307 rho_bld
->type
, float_size
, index0
);
308 /* skipping sqrt hence returning rho squared */
309 cubesize
= lp_build_mul(rho_bld
, cubesize
, cubesize
);
310 rho
= lp_build_mul(rho_bld
, cubesize
, rho
);
313 LLVMValueRef ddmax
[3] = { NULL
}, ddx
[3] = { NULL
}, ddy
[3] = { NULL
};
314 for (i
= 0; i
< dims
; i
++) {
315 LLVMValueRef floatdim
;
316 LLVMValueRef indexi
= lp_build_const_int32(gallivm
, i
);
318 floatdim
= lp_build_extract_broadcast(gallivm
, bld
->float_size_in_type
,
319 coord_bld
->type
, float_size
, indexi
);
322 * note that for rho_per_quad case could reduce math (at some shuffle
323 * cost), but for now use same code to per-pixel lod case.
326 ddx
[i
] = lp_build_mul(coord_bld
, floatdim
, derivs
->ddx
[i
]);
327 ddy
[i
] = lp_build_mul(coord_bld
, floatdim
, derivs
->ddy
[i
]);
328 ddx
[i
] = lp_build_mul(coord_bld
, ddx
[i
], ddx
[i
]);
329 ddy
[i
] = lp_build_mul(coord_bld
, ddy
[i
], ddy
[i
]);
332 LLVMValueRef tmpx
, tmpy
;
333 tmpx
= lp_build_abs(coord_bld
, derivs
->ddx
[i
]);
334 tmpy
= lp_build_abs(coord_bld
, derivs
->ddy
[i
]);
335 ddmax
[i
] = lp_build_max(coord_bld
, tmpx
, tmpy
);
336 ddmax
[i
] = lp_build_mul(coord_bld
, floatdim
, ddmax
[i
]);
340 rho_xvec
= lp_build_add(coord_bld
, ddx
[0], ddx
[1]);
341 rho_yvec
= lp_build_add(coord_bld
, ddy
[0], ddy
[1]);
343 rho_xvec
= lp_build_add(coord_bld
, rho_xvec
, ddx
[2]);
344 rho_yvec
= lp_build_add(coord_bld
, rho_yvec
, ddy
[2]);
346 rho
= lp_build_max(coord_bld
, rho_xvec
, rho_yvec
);
347 /* skipping sqrt hence returning rho squared */
352 rho
= lp_build_max(coord_bld
, rho
, ddmax
[1]);
354 rho
= lp_build_max(coord_bld
, rho
, ddmax
[2]);
360 * rho_vec contains per-pixel rho, convert to scalar per quad.
362 rho
= lp_build_pack_aos_scalars(bld
->gallivm
, coord_bld
->type
,
363 rho_bld
->type
, rho
, 0);
368 * This looks all a bit complex, but it's not that bad
369 * (the shuffle code makes it look worse than it is).
370 * Still, might not be ideal for all cases.
372 static const unsigned char swizzle0
[] = { /* no-op swizzle */
373 0, LP_BLD_SWIZZLE_DONTCARE
,
374 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
376 static const unsigned char swizzle1
[] = {
377 1, LP_BLD_SWIZZLE_DONTCARE
,
378 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
380 static const unsigned char swizzle2
[] = {
381 2, LP_BLD_SWIZZLE_DONTCARE
,
382 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
386 ddx_ddy
[0] = lp_build_packed_ddx_ddy_onecoord(coord_bld
, s
);
388 else if (dims
>= 2) {
389 ddx_ddy
[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld
, s
, t
);
391 ddx_ddy
[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld
, r
);
396 static const unsigned char swizzle01
[] = { /* no-op swizzle */
398 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
400 static const unsigned char swizzle23
[] = {
402 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
404 LLVMValueRef ddx_ddys
, ddx_ddyt
, floatdim
, shuffles
[LP_MAX_VECTOR_LENGTH
/ 4];
406 for (i
= 0; i
< num_quads
; i
++) {
407 shuffles
[i
*4+0] = shuffles
[i
*4+1] = index0
;
408 shuffles
[i
*4+2] = shuffles
[i
*4+3] = index1
;
410 floatdim
= LLVMBuildShuffleVector(builder
, float_size
, float_size
,
411 LLVMConstVector(shuffles
, length
), "");
412 ddx_ddy
[0] = lp_build_mul(coord_bld
, ddx_ddy
[0], floatdim
);
413 ddx_ddy
[0] = lp_build_mul(coord_bld
, ddx_ddy
[0], ddx_ddy
[0]);
414 ddx_ddys
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle01
);
415 ddx_ddyt
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle23
);
416 rho_vec
= lp_build_add(coord_bld
, ddx_ddys
, ddx_ddyt
);
419 static const unsigned char swizzle02
[] = {
421 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
423 floatdim
= lp_build_extract_broadcast(gallivm
, bld
->float_size_in_type
,
424 coord_bld
->type
, float_size
, index2
);
425 ddx_ddy
[1] = lp_build_mul(coord_bld
, ddx_ddy
[1], floatdim
);
426 ddx_ddy
[1] = lp_build_mul(coord_bld
, ddx_ddy
[1], ddx_ddy
[1]);
427 ddx_ddy
[1] = lp_build_swizzle_aos(coord_bld
, ddx_ddy
[1], swizzle02
);
428 rho_vec
= lp_build_add(coord_bld
, rho_vec
, ddx_ddy
[1]);
431 rho_xvec
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle0
);
432 rho_yvec
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle1
);
433 rho
= lp_build_max(coord_bld
, rho_xvec
, rho_yvec
);
436 rho
= lp_build_pack_aos_scalars(bld
->gallivm
, coord_bld
->type
,
437 rho_bld
->type
, rho
, 0);
440 rho
= lp_build_swizzle_scalar_aos(coord_bld
, rho
, 0, 4);
442 /* skipping sqrt hence returning rho squared */
445 ddx_ddy
[0] = lp_build_abs(coord_bld
, ddx_ddy
[0]);
447 ddx_ddy
[1] = lp_build_abs(coord_bld
, ddx_ddy
[1]);
450 ddx_ddy
[1] = NULL
; /* silence compiler warning */
454 rho_xvec
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle0
);
455 rho_yvec
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle2
);
457 else if (dims
== 2) {
458 static const unsigned char swizzle02
[] = {
460 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
462 static const unsigned char swizzle13
[] = {
464 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
466 rho_xvec
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle02
);
467 rho_yvec
= lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle13
);
470 LLVMValueRef shuffles1
[LP_MAX_VECTOR_LENGTH
];
471 LLVMValueRef shuffles2
[LP_MAX_VECTOR_LENGTH
];
473 for (i
= 0; i
< num_quads
; i
++) {
474 shuffles1
[4*i
+ 0] = lp_build_const_int32(gallivm
, 4*i
);
475 shuffles1
[4*i
+ 1] = lp_build_const_int32(gallivm
, 4*i
+ 2);
476 shuffles1
[4*i
+ 2] = lp_build_const_int32(gallivm
, length
+ 4*i
);
477 shuffles1
[4*i
+ 3] = i32undef
;
478 shuffles2
[4*i
+ 0] = lp_build_const_int32(gallivm
, 4*i
+ 1);
479 shuffles2
[4*i
+ 1] = lp_build_const_int32(gallivm
, 4*i
+ 3);
480 shuffles2
[4*i
+ 2] = lp_build_const_int32(gallivm
, length
+ 4*i
+ 2);
481 shuffles2
[4*i
+ 3] = i32undef
;
483 rho_xvec
= LLVMBuildShuffleVector(builder
, ddx_ddy
[0], ddx_ddy
[1],
484 LLVMConstVector(shuffles1
, length
), "");
485 rho_yvec
= LLVMBuildShuffleVector(builder
, ddx_ddy
[0], ddx_ddy
[1],
486 LLVMConstVector(shuffles2
, length
), "");
489 rho_vec
= lp_build_max(coord_bld
, rho_xvec
, rho_yvec
);
491 if (bld
->coord_type
.length
> 4) {
492 /* expand size to each quad */
494 /* could use some broadcast_vector helper for this? */
495 LLVMValueRef src
[LP_MAX_VECTOR_LENGTH
/4];
496 for (i
= 0; i
< num_quads
; i
++) {
499 float_size
= lp_build_concat(bld
->gallivm
, src
, float_size_bld
->type
, num_quads
);
502 float_size
= lp_build_broadcast_scalar(coord_bld
, float_size
);
504 rho_vec
= lp_build_mul(coord_bld
, rho_vec
, float_size
);
511 LLVMValueRef rho_s
, rho_t
, rho_r
;
513 rho_s
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle0
);
514 rho_t
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle1
);
516 rho
= lp_build_max(coord_bld
, rho_s
, rho_t
);
519 rho_r
= lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle2
);
520 rho
= lp_build_max(coord_bld
, rho
, rho_r
);
525 rho
= lp_build_pack_aos_scalars(bld
->gallivm
, coord_bld
->type
,
526 rho_bld
->type
, rho
, 0);
529 rho
= lp_build_swizzle_scalar_aos(coord_bld
, rho
, 0, 4);
534 rho_vec
= LLVMBuildExtractElement(builder
, rho_vec
, index0
, "");
536 rho_vec
= lp_build_mul(float_size_bld
, rho_vec
, float_size
);
543 LLVMValueRef rho_s
, rho_t
, rho_r
;
545 rho_s
= LLVMBuildExtractElement(builder
, rho_vec
, index0
, "");
546 rho_t
= LLVMBuildExtractElement(builder
, rho_vec
, index1
, "");
548 rho
= lp_build_max(float_bld
, rho_s
, rho_t
);
551 rho_r
= LLVMBuildExtractElement(builder
, rho_vec
, index2
, "");
552 rho
= lp_build_max(float_bld
, rho
, rho_r
);
557 rho
= lp_build_broadcast_scalar(rho_bld
, rho
);
568 * Bri-linear lod computation
570 * Use a piece-wise linear approximation of log2 such that:
571 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
572 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
573 * with the steepness specified in 'factor'
574 * - exact result for 0.5, 1.5, etc.
590 * This is a technique also commonly used in hardware:
591 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
593 * TODO: For correctness, this should only be applied when texture is known to
594 * have regular mipmaps, i.e., mipmaps derived from the base level.
596 * TODO: This could be done in fixed point, where applicable.
599 lp_build_brilinear_lod(struct lp_build_context
*bld
,
602 LLVMValueRef
*out_lod_ipart
,
603 LLVMValueRef
*out_lod_fpart
)
605 LLVMValueRef lod_fpart
;
606 double pre_offset
= (factor
- 0.5)/factor
- 0.5;
607 double post_offset
= 1 - factor
;
610 lp_build_printf(bld
->gallivm
, "lod = %f\n", lod
);
613 lod
= lp_build_add(bld
, lod
,
614 lp_build_const_vec(bld
->gallivm
, bld
->type
, pre_offset
));
616 lp_build_ifloor_fract(bld
, lod
, out_lod_ipart
, &lod_fpart
);
618 lod_fpart
= lp_build_mad(bld
, lod_fpart
,
619 lp_build_const_vec(bld
->gallivm
, bld
->type
, factor
),
620 lp_build_const_vec(bld
->gallivm
, bld
->type
, post_offset
));
623 * It's not necessary to clamp lod_fpart since:
624 * - the above expression will never produce numbers greater than one.
625 * - the mip filtering branch is only taken if lod_fpart is positive
628 *out_lod_fpart
= lod_fpart
;
631 lp_build_printf(bld
->gallivm
, "lod_ipart = %i\n", *out_lod_ipart
);
632 lp_build_printf(bld
->gallivm
, "lod_fpart = %f\n\n", *out_lod_fpart
);
638 * Combined log2 and brilinear lod computation.
640 * It's in all identical to calling lp_build_fast_log2() and
641 * lp_build_brilinear_lod() above, but by combining we can compute the integer
642 * and fractional part independently.
645 lp_build_brilinear_rho(struct lp_build_context
*bld
,
648 LLVMValueRef
*out_lod_ipart
,
649 LLVMValueRef
*out_lod_fpart
)
651 LLVMValueRef lod_ipart
;
652 LLVMValueRef lod_fpart
;
654 const double pre_factor
= (2*factor
- 0.5)/(M_SQRT2
*factor
);
655 const double post_offset
= 1 - 2*factor
;
657 assert(bld
->type
.floating
);
659 assert(lp_check_value(bld
->type
, rho
));
662 * The pre factor will make the intersections with the exact powers of two
663 * happen precisely where we want them to be, which means that the integer
664 * part will not need any post adjustments.
666 rho
= lp_build_mul(bld
, rho
,
667 lp_build_const_vec(bld
->gallivm
, bld
->type
, pre_factor
));
669 /* ipart = ifloor(log2(rho)) */
670 lod_ipart
= lp_build_extract_exponent(bld
, rho
, 0);
672 /* fpart = rho / 2**ipart */
673 lod_fpart
= lp_build_extract_mantissa(bld
, rho
);
675 lod_fpart
= lp_build_mad(bld
, lod_fpart
,
676 lp_build_const_vec(bld
->gallivm
, bld
->type
, factor
),
677 lp_build_const_vec(bld
->gallivm
, bld
->type
, post_offset
));
680 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
681 * - the above expression will never produce numbers greater than one.
682 * - the mip filtering branch is only taken if lod_fpart is positive
685 *out_lod_ipart
= lod_ipart
;
686 *out_lod_fpart
= lod_fpart
;
691 * Fast implementation of iround(log2(sqrt(x))), based on
692 * log2(x^n) == n*log2(x).
694 * Gives accurate results all the time.
695 * (Could be trivially extended to handle other power-of-two roots.)
698 lp_build_ilog2_sqrt(struct lp_build_context
*bld
,
701 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
703 struct lp_type i_type
= lp_int_type(bld
->type
);
704 LLVMValueRef one
= lp_build_const_int_vec(bld
->gallivm
, i_type
, 1);
706 assert(bld
->type
.floating
);
708 assert(lp_check_value(bld
->type
, x
));
710 /* ipart = log2(x) + 0.5 = 0.5*(log2(x^2) + 1.0) */
711 ipart
= lp_build_extract_exponent(bld
, x
, 1);
712 ipart
= LLVMBuildAShr(builder
, ipart
, one
, "");
719 * Generate code to compute texture level of detail (lambda).
720 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
721 * \param lod_bias optional float vector with the shader lod bias
722 * \param explicit_lod optional float vector with the explicit lod
723 * \param cube_rho rho calculated by cube coord mapping (optional)
724 * \param out_lod_ipart integer part of lod
725 * \param out_lod_fpart float part of lod (never larger than 1 but may be negative)
726 * \param out_lod_positive (mask) if lod is positive (i.e. texture is minified)
728 * The resulting lod can be scalar per quad or be per element.
731 lp_build_lod_selector(struct lp_build_sample_context
*bld
,
733 unsigned texture_unit
,
734 unsigned sampler_unit
,
738 LLVMValueRef cube_rho
,
739 const struct lp_derivatives
*derivs
,
740 LLVMValueRef lod_bias
, /* optional */
741 LLVMValueRef explicit_lod
, /* optional */
743 LLVMValueRef
*out_lod
,
744 LLVMValueRef
*out_lod_ipart
,
745 LLVMValueRef
*out_lod_fpart
,
746 LLVMValueRef
*out_lod_positive
)
749 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
750 struct lp_sampler_dynamic_state
*dynamic_state
= bld
->dynamic_state
;
751 struct lp_build_context
*lodf_bld
= &bld
->lodf_bld
;
754 *out_lod_ipart
= bld
->lodi_bld
.zero
;
755 *out_lod_positive
= bld
->lodi_bld
.zero
;
756 *out_lod_fpart
= lodf_bld
->zero
;
759 * For determining min/mag, we follow GL 4.1 spec, 3.9.12 Texture Magnification:
760 * "Implementations may either unconditionally assume c = 0 for the minification
761 * vs. magnification switch-over point, or may choose to make c depend on the
762 * combination of minification and magnification modes as follows: if the
763 * magnification filter is given by LINEAR and the minification filter is given
764 * by NEAREST_MIPMAP_NEAREST or NEAREST_MIPMAP_LINEAR, then c = 0.5. This is
765 * done to ensure that a minified texture does not appear "sharper" than a
766 * magnified texture. Otherwise c = 0."
767 * And 3.9.11 Texture Minification:
768 * "If lod is less than or equal to the constant c (see section 3.9.12) the
769 * texture is said to be magnified; if it is greater, the texture is minified."
770 * So, using 0 as switchover point always, and using magnification for lod == 0.
771 * Note that the always c = 0 behavior is new (first appearing in GL 3.1 spec),
772 * old GL versions required 0.5 for the modes listed above.
773 * I have no clue about the (undocumented) wishes of d3d9/d3d10 here!
776 if (bld
->static_sampler_state
->min_max_lod_equal
&& !is_lodq
) {
777 /* User is forcing sampling from a particular mipmap level.
778 * This is hit during mipmap generation.
780 LLVMValueRef min_lod
=
781 dynamic_state
->min_lod(dynamic_state
, bld
->gallivm
,
782 bld
->context_ptr
, sampler_unit
);
784 lod
= lp_build_broadcast_scalar(lodf_bld
, min_lod
);
788 if (bld
->num_lods
!= bld
->coord_type
.length
)
789 lod
= lp_build_pack_aos_scalars(bld
->gallivm
, bld
->coord_bld
.type
,
790 lodf_bld
->type
, explicit_lod
, 0);
796 boolean rho_squared
= (bld
->no_rho_approx
&&
797 (bld
->dims
> 1)) || cube_rho
;
799 rho
= lp_build_rho(bld
, texture_unit
, s
, t
, r
, cube_rho
, derivs
);
802 * Compute lod = log2(rho)
805 if (!lod_bias
&& !is_lodq
&&
806 !bld
->static_sampler_state
->lod_bias_non_zero
&&
807 !bld
->static_sampler_state
->apply_max_lod
&&
808 !bld
->static_sampler_state
->apply_min_lod
) {
810 * Special case when there are no post-log2 adjustments, which
811 * saves instructions but keeping the integer and fractional lod
812 * computations separate from the start.
815 if (mip_filter
== PIPE_TEX_MIPFILTER_NONE
||
816 mip_filter
== PIPE_TEX_MIPFILTER_NEAREST
) {
818 * Don't actually need both values all the time, lod_ipart is
819 * needed for nearest mipfilter, lod_positive if min != mag.
822 *out_lod_ipart
= lp_build_ilog2_sqrt(lodf_bld
, rho
);
825 *out_lod_ipart
= lp_build_ilog2(lodf_bld
, rho
);
827 *out_lod_positive
= lp_build_cmp(lodf_bld
, PIPE_FUNC_GREATER
,
831 if (mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
&&
832 !bld
->no_brilinear
&& !rho_squared
) {
834 * This can't work if rho is squared. Not sure if it could be
835 * fixed while keeping it worthwile, could also do sqrt here
836 * but brilinear and no_rho_opt seems like a combination not
837 * making much sense anyway so just use ordinary path below.
839 lp_build_brilinear_rho(lodf_bld
, rho
, BRILINEAR_FACTOR
,
840 out_lod_ipart
, out_lod_fpart
);
841 *out_lod_positive
= lp_build_cmp(lodf_bld
, PIPE_FUNC_GREATER
,
848 lod
= lp_build_log2(lodf_bld
, rho
);
851 /* get more accurate results if we just sqaure rho always */
853 rho
= lp_build_mul(lodf_bld
, rho
, rho
);
854 lod
= lp_build_fast_log2(lodf_bld
, rho
);
857 /* log2(x^2) == 0.5*log2(x) */
858 lod
= lp_build_mul(lodf_bld
, lod
,
859 lp_build_const_vec(bld
->gallivm
, lodf_bld
->type
, 0.5F
));
861 /* add shader lod bias */
863 if (bld
->num_lods
!= bld
->coord_type
.length
)
864 lod_bias
= lp_build_pack_aos_scalars(bld
->gallivm
, bld
->coord_bld
.type
,
865 lodf_bld
->type
, lod_bias
, 0);
866 lod
= LLVMBuildFAdd(builder
, lod
, lod_bias
, "shader_lod_bias");
870 /* add sampler lod bias */
871 if (bld
->static_sampler_state
->lod_bias_non_zero
) {
872 LLVMValueRef sampler_lod_bias
=
873 dynamic_state
->lod_bias(dynamic_state
, bld
->gallivm
,
874 bld
->context_ptr
, sampler_unit
);
875 sampler_lod_bias
= lp_build_broadcast_scalar(lodf_bld
,
877 lod
= LLVMBuildFAdd(builder
, lod
, sampler_lod_bias
, "sampler_lod_bias");
885 if (bld
->static_sampler_state
->apply_max_lod
) {
886 LLVMValueRef max_lod
=
887 dynamic_state
->max_lod(dynamic_state
, bld
->gallivm
,
888 bld
->context_ptr
, sampler_unit
);
889 max_lod
= lp_build_broadcast_scalar(lodf_bld
, max_lod
);
891 lod
= lp_build_min(lodf_bld
, lod
, max_lod
);
893 if (bld
->static_sampler_state
->apply_min_lod
) {
894 LLVMValueRef min_lod
=
895 dynamic_state
->min_lod(dynamic_state
, bld
->gallivm
,
896 bld
->context_ptr
, sampler_unit
);
897 min_lod
= lp_build_broadcast_scalar(lodf_bld
, min_lod
);
899 lod
= lp_build_max(lodf_bld
, lod
, min_lod
);
903 *out_lod_fpart
= lod
;
908 *out_lod_positive
= lp_build_cmp(lodf_bld
, PIPE_FUNC_GREATER
,
909 lod
, lodf_bld
->zero
);
911 if (mip_filter
== PIPE_TEX_MIPFILTER_LINEAR
) {
912 if (!bld
->no_brilinear
) {
913 lp_build_brilinear_lod(lodf_bld
, lod
, BRILINEAR_FACTOR
,
914 out_lod_ipart
, out_lod_fpart
);
917 lp_build_ifloor_fract(lodf_bld
, lod
, out_lod_ipart
, out_lod_fpart
);
920 lp_build_name(*out_lod_fpart
, "lod_fpart");
923 *out_lod_ipart
= lp_build_iround(lodf_bld
, lod
);
926 lp_build_name(*out_lod_ipart
, "lod_ipart");
933 * For PIPE_TEX_MIPFILTER_NEAREST, convert int part of lod
934 * to actual mip level.
935 * Note: this is all scalar per quad code.
936 * \param lod_ipart int texture level of detail
937 * \param level_out returns integer
938 * \param out_of_bounds returns per coord out_of_bounds mask if provided
941 lp_build_nearest_mip_level(struct lp_build_sample_context
*bld
,
942 unsigned texture_unit
,
943 LLVMValueRef lod_ipart
,
944 LLVMValueRef
*level_out
,
945 LLVMValueRef
*out_of_bounds
)
947 struct lp_build_context
*leveli_bld
= &bld
->leveli_bld
;
948 struct lp_sampler_dynamic_state
*dynamic_state
= bld
->dynamic_state
;
949 LLVMValueRef first_level
, last_level
, level
;
951 first_level
= dynamic_state
->first_level(dynamic_state
, bld
->gallivm
,
952 bld
->context_ptr
, texture_unit
, NULL
);
953 last_level
= dynamic_state
->last_level(dynamic_state
, bld
->gallivm
,
954 bld
->context_ptr
, texture_unit
, NULL
);
955 first_level
= lp_build_broadcast_scalar(leveli_bld
, first_level
);
956 last_level
= lp_build_broadcast_scalar(leveli_bld
, last_level
);
958 level
= lp_build_add(leveli_bld
, lod_ipart
, first_level
);
961 LLVMValueRef out
, out1
;
962 out
= lp_build_cmp(leveli_bld
, PIPE_FUNC_LESS
, level
, first_level
);
963 out1
= lp_build_cmp(leveli_bld
, PIPE_FUNC_GREATER
, level
, last_level
);
964 out
= lp_build_or(leveli_bld
, out
, out1
);
965 if (bld
->num_mips
== bld
->coord_bld
.type
.length
) {
966 *out_of_bounds
= out
;
968 else if (bld
->num_mips
== 1) {
969 *out_of_bounds
= lp_build_broadcast_scalar(&bld
->int_coord_bld
, out
);
972 assert(bld
->num_mips
== bld
->coord_bld
.type
.length
/ 4);
973 *out_of_bounds
= lp_build_unpack_broadcast_aos_scalars(bld
->gallivm
,
975 bld
->int_coord_bld
.type
,
978 level
= lp_build_andnot(&bld
->int_coord_bld
, level
, *out_of_bounds
);
982 /* clamp level to legal range of levels */
983 *level_out
= lp_build_clamp(leveli_bld
, level
, first_level
, last_level
);
990 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad (or per element) int LOD(s)
991 * to two (per-quad) (adjacent) mipmap level indexes, and fix up float lod
993 * Later, we'll sample from those two mipmap levels and interpolate between them.
996 lp_build_linear_mip_levels(struct lp_build_sample_context
*bld
,
997 unsigned texture_unit
,
998 LLVMValueRef lod_ipart
,
999 LLVMValueRef
*lod_fpart_inout
,
1000 LLVMValueRef
*level0_out
,
1001 LLVMValueRef
*level1_out
)
1003 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1004 struct lp_sampler_dynamic_state
*dynamic_state
= bld
->dynamic_state
;
1005 struct lp_build_context
*leveli_bld
= &bld
->leveli_bld
;
1006 struct lp_build_context
*levelf_bld
= &bld
->levelf_bld
;
1007 LLVMValueRef first_level
, last_level
;
1008 LLVMValueRef clamp_min
;
1009 LLVMValueRef clamp_max
;
1011 assert(bld
->num_lods
== bld
->num_mips
);
1013 first_level
= dynamic_state
->first_level(dynamic_state
, bld
->gallivm
,
1014 bld
->context_ptr
, texture_unit
, NULL
);
1015 last_level
= dynamic_state
->last_level(dynamic_state
, bld
->gallivm
,
1016 bld
->context_ptr
, texture_unit
, NULL
);
1017 first_level
= lp_build_broadcast_scalar(leveli_bld
, first_level
);
1018 last_level
= lp_build_broadcast_scalar(leveli_bld
, last_level
);
1020 *level0_out
= lp_build_add(leveli_bld
, lod_ipart
, first_level
);
1021 *level1_out
= lp_build_add(leveli_bld
, *level0_out
, leveli_bld
->one
);
1024 * Clamp both *level0_out and *level1_out to [first_level, last_level], with
1025 * the minimum number of comparisons, and zeroing lod_fpart in the extreme
1026 * ends in the process.
1029 /* *level0_out < first_level */
1030 clamp_min
= LLVMBuildICmp(builder
, LLVMIntSLT
,
1031 *level0_out
, first_level
,
1032 "clamp_lod_to_first");
1034 *level0_out
= LLVMBuildSelect(builder
, clamp_min
,
1035 first_level
, *level0_out
, "");
1037 *level1_out
= LLVMBuildSelect(builder
, clamp_min
,
1038 first_level
, *level1_out
, "");
1040 *lod_fpart_inout
= LLVMBuildSelect(builder
, clamp_min
,
1041 levelf_bld
->zero
, *lod_fpart_inout
, "");
1043 /* *level0_out >= last_level */
1044 clamp_max
= LLVMBuildICmp(builder
, LLVMIntSGE
,
1045 *level0_out
, last_level
,
1046 "clamp_lod_to_last");
1048 *level0_out
= LLVMBuildSelect(builder
, clamp_max
,
1049 last_level
, *level0_out
, "");
1051 *level1_out
= LLVMBuildSelect(builder
, clamp_max
,
1052 last_level
, *level1_out
, "");
1054 *lod_fpart_inout
= LLVMBuildSelect(builder
, clamp_max
,
1055 levelf_bld
->zero
, *lod_fpart_inout
, "");
1057 lp_build_name(*level0_out
, "texture%u_miplevel0", texture_unit
);
1058 lp_build_name(*level1_out
, "texture%u_miplevel1", texture_unit
);
1059 lp_build_name(*lod_fpart_inout
, "texture%u_mipweight", texture_unit
);
1064 * Return pointer to a single mipmap level.
1065 * \param level integer mipmap level
1068 lp_build_get_mipmap_level(struct lp_build_sample_context
*bld
,
1071 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1072 LLVMValueRef indexes
[2], data_ptr
, mip_offset
;
1074 indexes
[0] = lp_build_const_int32(bld
->gallivm
, 0);
1076 mip_offset
= LLVMBuildGEP(builder
, bld
->mip_offsets
, indexes
, 2, "");
1077 mip_offset
= LLVMBuildLoad(builder
, mip_offset
, "");
1078 data_ptr
= LLVMBuildGEP(builder
, bld
->base_ptr
, &mip_offset
, 1, "");
1083 * Return (per-pixel) offsets to mip levels.
1084 * \param level integer mipmap level
1087 lp_build_get_mip_offsets(struct lp_build_sample_context
*bld
,
1090 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1091 LLVMValueRef indexes
[2], offsets
, offset1
;
1093 indexes
[0] = lp_build_const_int32(bld
->gallivm
, 0);
1094 if (bld
->num_mips
== 1) {
1096 offset1
= LLVMBuildGEP(builder
, bld
->mip_offsets
, indexes
, 2, "");
1097 offset1
= LLVMBuildLoad(builder
, offset1
, "");
1098 offsets
= lp_build_broadcast_scalar(&bld
->int_coord_bld
, offset1
);
1100 else if (bld
->num_mips
== bld
->coord_bld
.type
.length
/ 4) {
1103 offsets
= bld
->int_coord_bld
.undef
;
1104 for (i
= 0; i
< bld
->num_mips
; i
++) {
1105 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1106 LLVMValueRef indexo
= lp_build_const_int32(bld
->gallivm
, 4 * i
);
1107 indexes
[1] = LLVMBuildExtractElement(builder
, level
, indexi
, "");
1108 offset1
= LLVMBuildGEP(builder
, bld
->mip_offsets
, indexes
, 2, "");
1109 offset1
= LLVMBuildLoad(builder
, offset1
, "");
1110 offsets
= LLVMBuildInsertElement(builder
, offsets
, offset1
, indexo
, "");
1112 offsets
= lp_build_swizzle_scalar_aos(&bld
->int_coord_bld
, offsets
, 0, 4);
1117 assert (bld
->num_mips
== bld
->coord_bld
.type
.length
);
1119 offsets
= bld
->int_coord_bld
.undef
;
1120 for (i
= 0; i
< bld
->num_mips
; i
++) {
1121 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1122 indexes
[1] = LLVMBuildExtractElement(builder
, level
, indexi
, "");
1123 offset1
= LLVMBuildGEP(builder
, bld
->mip_offsets
, indexes
, 2, "");
1124 offset1
= LLVMBuildLoad(builder
, offset1
, "");
1125 offsets
= LLVMBuildInsertElement(builder
, offsets
, offset1
, indexi
, "");
1133 * Codegen equivalent for u_minify().
1134 * @param lod_scalar if lod is a (broadcasted) scalar
1135 * Return max(1, base_size >> level);
1138 lp_build_minify(struct lp_build_context
*bld
,
1139 LLVMValueRef base_size
,
1143 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1144 assert(lp_check_value(bld
->type
, base_size
));
1145 assert(lp_check_value(bld
->type
, level
));
1147 if (level
== bld
->zero
) {
1148 /* if we're using mipmap level zero, no minification is needed */
1153 assert(bld
->type
.sign
);
1155 (util_cpu_caps
.has_avx2
|| !util_cpu_caps
.has_sse
)) {
1156 size
= LLVMBuildLShr(builder
, base_size
, level
, "minify");
1157 size
= lp_build_max(bld
, size
, bld
->one
);
1161 * emulate shift with float mul, since intel "forgot" shifts with
1162 * per-element shift count until avx2, which results in terrible
1163 * scalar extraction (both count and value), scalar shift,
1164 * vector reinsertion. Should not be an issue on any non-x86 cpu
1165 * with a vector instruction set.
1166 * On cpus with AMD's XOP this should also be unnecessary but I'm
1167 * not sure if llvm would emit this with current flags.
1169 LLVMValueRef const127
, const23
, lf
;
1170 struct lp_type ftype
;
1171 struct lp_build_context fbld
;
1172 ftype
= lp_type_float_vec(32, bld
->type
.length
* bld
->type
.width
);
1173 lp_build_context_init(&fbld
, bld
->gallivm
, ftype
);
1174 const127
= lp_build_const_int_vec(bld
->gallivm
, bld
->type
, 127);
1175 const23
= lp_build_const_int_vec(bld
->gallivm
, bld
->type
, 23);
1177 /* calculate 2^(-level) float */
1178 lf
= lp_build_sub(bld
, const127
, level
);
1179 lf
= lp_build_shl(bld
, lf
, const23
);
1180 lf
= LLVMBuildBitCast(builder
, lf
, fbld
.vec_type
, "");
1182 /* finish shift operation by doing float mul */
1183 base_size
= lp_build_int_to_float(&fbld
, base_size
);
1184 size
= lp_build_mul(&fbld
, base_size
, lf
);
1186 * do the max also with floats because
1187 * a) non-emulated int max requires sse41
1188 * (this is actually a lie as we could cast to 16bit values
1189 * as 16bit is sufficient and 16bit int max is sse2)
1190 * b) with avx we can do int max 4-wide but float max 8-wide
1192 size
= lp_build_max(&fbld
, size
, fbld
.one
);
1193 size
= lp_build_itrunc(&fbld
, size
);
1201 * Dereference stride_array[mipmap_level] array to get a stride.
1202 * Return stride as a vector.
1205 lp_build_get_level_stride_vec(struct lp_build_sample_context
*bld
,
1206 LLVMValueRef stride_array
, LLVMValueRef level
)
1208 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1209 LLVMValueRef indexes
[2], stride
, stride1
;
1210 indexes
[0] = lp_build_const_int32(bld
->gallivm
, 0);
1211 if (bld
->num_mips
== 1) {
1213 stride1
= LLVMBuildGEP(builder
, stride_array
, indexes
, 2, "");
1214 stride1
= LLVMBuildLoad(builder
, stride1
, "");
1215 stride
= lp_build_broadcast_scalar(&bld
->int_coord_bld
, stride1
);
1217 else if (bld
->num_mips
== bld
->coord_bld
.type
.length
/ 4) {
1218 LLVMValueRef stride1
;
1221 stride
= bld
->int_coord_bld
.undef
;
1222 for (i
= 0; i
< bld
->num_mips
; i
++) {
1223 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1224 LLVMValueRef indexo
= lp_build_const_int32(bld
->gallivm
, 4 * i
);
1225 indexes
[1] = LLVMBuildExtractElement(builder
, level
, indexi
, "");
1226 stride1
= LLVMBuildGEP(builder
, stride_array
, indexes
, 2, "");
1227 stride1
= LLVMBuildLoad(builder
, stride1
, "");
1228 stride
= LLVMBuildInsertElement(builder
, stride
, stride1
, indexo
, "");
1230 stride
= lp_build_swizzle_scalar_aos(&bld
->int_coord_bld
, stride
, 0, 4);
1233 LLVMValueRef stride1
;
1236 assert (bld
->num_mips
== bld
->coord_bld
.type
.length
);
1238 stride
= bld
->int_coord_bld
.undef
;
1239 for (i
= 0; i
< bld
->coord_bld
.type
.length
; i
++) {
1240 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1241 indexes
[1] = LLVMBuildExtractElement(builder
, level
, indexi
, "");
1242 stride1
= LLVMBuildGEP(builder
, stride_array
, indexes
, 2, "");
1243 stride1
= LLVMBuildLoad(builder
, stride1
, "");
1244 stride
= LLVMBuildInsertElement(builder
, stride
, stride1
, indexi
, "");
1252 * When sampling a mipmap, we need to compute the width, height, depth
1253 * of the source levels from the level indexes. This helper function
1257 lp_build_mipmap_level_sizes(struct lp_build_sample_context
*bld
,
1258 LLVMValueRef ilevel
,
1259 LLVMValueRef
*out_size
,
1260 LLVMValueRef
*row_stride_vec
,
1261 LLVMValueRef
*img_stride_vec
)
1263 const unsigned dims
= bld
->dims
;
1264 LLVMValueRef ilevel_vec
;
1267 * Compute width, height, depth at mipmap level 'ilevel'
1269 if (bld
->num_mips
== 1) {
1270 ilevel_vec
= lp_build_broadcast_scalar(&bld
->int_size_bld
, ilevel
);
1271 *out_size
= lp_build_minify(&bld
->int_size_bld
, bld
->int_size
, ilevel_vec
, TRUE
);
1274 LLVMValueRef int_size_vec
;
1275 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
1276 unsigned num_quads
= bld
->coord_bld
.type
.length
/ 4;
1279 if (bld
->num_mips
== num_quads
) {
1281 * XXX: this should be #ifndef SANE_INSTRUCTION_SET.
1282 * intel "forgot" the variable shift count instruction until avx2.
1283 * A harmless 8x32 shift gets translated into 32 instructions
1284 * (16 extracts, 8 scalar shifts, 8 inserts), llvm is apparently
1285 * unable to recognize if there are really just 2 different shift
1286 * count values. So do the shift 4-wide before expansion.
1288 struct lp_build_context bld4
;
1289 struct lp_type type4
;
1291 type4
= bld
->int_coord_bld
.type
;
1294 lp_build_context_init(&bld4
, bld
->gallivm
, type4
);
1296 if (bld
->dims
== 1) {
1297 assert(bld
->int_size_in_bld
.type
.length
== 1);
1298 int_size_vec
= lp_build_broadcast_scalar(&bld4
,
1302 assert(bld
->int_size_in_bld
.type
.length
== 4);
1303 int_size_vec
= bld
->int_size
;
1306 for (i
= 0; i
< num_quads
; i
++) {
1307 LLVMValueRef ileveli
;
1308 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1310 ileveli
= lp_build_extract_broadcast(bld
->gallivm
,
1311 bld
->leveli_bld
.type
,
1315 tmp
[i
] = lp_build_minify(&bld4
, int_size_vec
, ileveli
, TRUE
);
1318 * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1,
1319 * [w0, w0, w0, w0, w1, w1, w1, w1, ...] otherwise.
1321 *out_size
= lp_build_concat(bld
->gallivm
,
1327 /* FIXME: this is terrible and results in _huge_ vector
1328 * (for the dims > 1 case).
1329 * Should refactor this (together with extract_image_sizes) and do
1330 * something more useful. Could for instance if we have width,height
1331 * with 4-wide vector pack all elements into a 8xi16 vector
1332 * (on which we can still do useful math) instead of using a 16xi32
1334 * For dims == 1 this will create [w0, w1, w2, w3, ...] vector.
1335 * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector.
1337 assert(bld
->num_mips
== bld
->coord_bld
.type
.length
);
1338 if (bld
->dims
== 1) {
1339 assert(bld
->int_size_in_bld
.type
.length
== 1);
1340 int_size_vec
= lp_build_broadcast_scalar(&bld
->int_coord_bld
,
1342 *out_size
= lp_build_minify(&bld
->int_coord_bld
, int_size_vec
, ilevel
, FALSE
);
1345 LLVMValueRef ilevel1
;
1346 for (i
= 0; i
< bld
->num_mips
; i
++) {
1347 LLVMValueRef indexi
= lp_build_const_int32(bld
->gallivm
, i
);
1348 ilevel1
= lp_build_extract_broadcast(bld
->gallivm
, bld
->int_coord_type
,
1349 bld
->int_size_in_bld
.type
, ilevel
, indexi
);
1350 tmp
[i
] = bld
->int_size
;
1351 tmp
[i
] = lp_build_minify(&bld
->int_size_in_bld
, tmp
[i
], ilevel1
, TRUE
);
1353 *out_size
= lp_build_concat(bld
->gallivm
, tmp
,
1354 bld
->int_size_in_bld
.type
,
1361 *row_stride_vec
= lp_build_get_level_stride_vec(bld
,
1362 bld
->row_stride_array
,
1365 if (dims
== 3 || has_layer_coord(bld
->static_texture_state
->target
)) {
1366 *img_stride_vec
= lp_build_get_level_stride_vec(bld
,
1367 bld
->img_stride_array
,
1374 * Extract and broadcast texture size.
1376 * @param size_type type of the texture size vector (either
1377 * bld->int_size_type or bld->float_size_type)
1378 * @param coord_type type of the texture size vector (either
1379 * bld->int_coord_type or bld->coord_type)
1380 * @param size vector with the texture size (width, height, depth)
1383 lp_build_extract_image_sizes(struct lp_build_sample_context
*bld
,
1384 struct lp_build_context
*size_bld
,
1385 struct lp_type coord_type
,
1387 LLVMValueRef
*out_width
,
1388 LLVMValueRef
*out_height
,
1389 LLVMValueRef
*out_depth
)
1391 const unsigned dims
= bld
->dims
;
1392 LLVMTypeRef i32t
= LLVMInt32TypeInContext(bld
->gallivm
->context
);
1393 struct lp_type size_type
= size_bld
->type
;
1395 if (bld
->num_mips
== 1) {
1396 *out_width
= lp_build_extract_broadcast(bld
->gallivm
,
1400 LLVMConstInt(i32t
, 0, 0));
1402 *out_height
= lp_build_extract_broadcast(bld
->gallivm
,
1406 LLVMConstInt(i32t
, 1, 0));
1408 *out_depth
= lp_build_extract_broadcast(bld
->gallivm
,
1412 LLVMConstInt(i32t
, 2, 0));
1417 unsigned num_quads
= bld
->coord_bld
.type
.length
/ 4;
1422 else if (bld
->num_mips
== num_quads
) {
1423 *out_width
= lp_build_swizzle_scalar_aos(size_bld
, size
, 0, 4);
1425 *out_height
= lp_build_swizzle_scalar_aos(size_bld
, size
, 1, 4);
1427 *out_depth
= lp_build_swizzle_scalar_aos(size_bld
, size
, 2, 4);
1432 assert(bld
->num_mips
== bld
->coord_type
.length
);
1433 *out_width
= lp_build_pack_aos_scalars(bld
->gallivm
, size_type
,
1434 coord_type
, size
, 0);
1436 *out_height
= lp_build_pack_aos_scalars(bld
->gallivm
, size_type
,
1437 coord_type
, size
, 1);
1439 *out_depth
= lp_build_pack_aos_scalars(bld
->gallivm
, size_type
,
1440 coord_type
, size
, 2);
1449 * Unnormalize coords.
1451 * @param flt_size vector with the integer texture size (width, height, depth)
1454 lp_build_unnormalized_coords(struct lp_build_sample_context
*bld
,
1455 LLVMValueRef flt_size
,
1460 const unsigned dims
= bld
->dims
;
1462 LLVMValueRef height
= NULL
;
1463 LLVMValueRef depth
= NULL
;
1465 lp_build_extract_image_sizes(bld
,
1466 &bld
->float_size_bld
,
1473 /* s = s * width, t = t * height */
1474 *s
= lp_build_mul(&bld
->coord_bld
, *s
, width
);
1476 *t
= lp_build_mul(&bld
->coord_bld
, *t
, height
);
1478 *r
= lp_build_mul(&bld
->coord_bld
, *r
, depth
);
1484 * Generate new coords and faces for cubemap texels falling off the face.
1486 * @param face face (center) of the pixel
1487 * @param x0 lower x coord
1488 * @param x1 higher x coord (must be x0 + 1)
1489 * @param y0 lower y coord
1490 * @param y1 higher y coord (must be x0 + 1)
1491 * @param max_coord texture cube (level) size - 1
1492 * @param next_faces new face values when falling off
1493 * @param next_xcoords new x coord values when falling off
1494 * @param next_ycoords new y coord values when falling off
1496 * The arrays hold the new values when under/overflow of
1497 * lower x, higher x, lower y, higher y coord would occur (in this order).
1498 * next_xcoords/next_ycoords have two entries each (for both new lower and
1502 lp_build_cube_new_coords(struct lp_build_context
*ivec_bld
,
1508 LLVMValueRef max_coord
,
1509 LLVMValueRef next_faces
[4],
1510 LLVMValueRef next_xcoords
[4][2],
1511 LLVMValueRef next_ycoords
[4][2])
1514 * Lookup tables aren't nice for simd code hence try some logic here.
1515 * (Note that while it would not be necessary to do per-sample (4) lookups
1516 * when using a LUT as it's impossible that texels fall off of positive
1517 * and negative edges simultaneously, it would however be necessary to
1518 * do 2 lookups for corner handling as in this case texels both fall off
1522 * Next faces (for face 012345):
1527 * Hence nfx+ (and nfy+) == nfx- (nfy-) xor 1
1528 * nfx-: face > 1 ? (face == 5 ? 0 : 1) : (4 + face & 1)
1529 * nfy+: face & ~4 > 1 ? face + 2 : 3;
1530 * This could also use pshufb instead, but would need (manually coded)
1531 * ssse3 intrinsic (llvm won't do non-constant shuffles).
1533 struct gallivm_state
*gallivm
= ivec_bld
->gallivm
;
1534 LLVMValueRef sel
, sel_f2345
, sel_f23
, sel_f2
, tmpsel
, tmp
;
1535 LLVMValueRef faceand1
, sel_fand1
, maxmx0
, maxmx1
, maxmy0
, maxmy1
;
1536 LLVMValueRef c2
= lp_build_const_int_vec(gallivm
, ivec_bld
->type
, 2);
1537 LLVMValueRef c3
= lp_build_const_int_vec(gallivm
, ivec_bld
->type
, 3);
1538 LLVMValueRef c4
= lp_build_const_int_vec(gallivm
, ivec_bld
->type
, 4);
1539 LLVMValueRef c5
= lp_build_const_int_vec(gallivm
, ivec_bld
->type
, 5);
1541 sel
= lp_build_cmp(ivec_bld
, PIPE_FUNC_EQUAL
, face
, c5
);
1542 tmpsel
= lp_build_select(ivec_bld
, sel
, ivec_bld
->zero
, ivec_bld
->one
);
1543 sel_f2345
= lp_build_cmp(ivec_bld
, PIPE_FUNC_GREATER
, face
, ivec_bld
->one
);
1544 faceand1
= lp_build_and(ivec_bld
, face
, ivec_bld
->one
);
1545 tmp
= lp_build_add(ivec_bld
, faceand1
, c4
);
1546 next_faces
[0] = lp_build_select(ivec_bld
, sel_f2345
, tmpsel
, tmp
);
1547 next_faces
[1] = lp_build_xor(ivec_bld
, next_faces
[0], ivec_bld
->one
);
1549 tmp
= lp_build_andnot(ivec_bld
, face
, c4
);
1550 sel_f23
= lp_build_cmp(ivec_bld
, PIPE_FUNC_GREATER
, tmp
, ivec_bld
->one
);
1551 tmp
= lp_build_add(ivec_bld
, face
, c2
);
1552 next_faces
[3] = lp_build_select(ivec_bld
, sel_f23
, tmp
, c3
);
1553 next_faces
[2] = lp_build_xor(ivec_bld
, next_faces
[3], ivec_bld
->one
);
1556 * new xcoords (for face 012345):
1557 * x < 0.0 : max max t max-t max max
1558 * x >= 1.0 : 0 0 max-t t 0 0
1559 * y < 0.0 : max 0 max-s s s max-s
1560 * y >= 1.0 : max 0 s max-s s max-s
1562 * ncx[1] = face & ~4 > 1 ? (face == 2 ? max-t : t) : 0
1563 * ncx[0] = max - ncx[1]
1564 * ncx[3] = face > 1 ? (face & 1 ? max-s : s) : (face & 1) ? 0 : max
1565 * ncx[2] = face & ~4 > 1 ? max - ncx[3] : ncx[3]
1567 sel_f2
= lp_build_cmp(ivec_bld
, PIPE_FUNC_EQUAL
, face
, c2
);
1568 maxmy0
= lp_build_sub(ivec_bld
, max_coord
, y0
);
1569 tmp
= lp_build_select(ivec_bld
, sel_f2
, maxmy0
, y0
);
1570 next_xcoords
[1][0] = lp_build_select(ivec_bld
, sel_f23
, tmp
, ivec_bld
->zero
);
1571 next_xcoords
[0][0] = lp_build_sub(ivec_bld
, max_coord
, next_xcoords
[1][0]);
1572 maxmy1
= lp_build_sub(ivec_bld
, max_coord
, y1
);
1573 tmp
= lp_build_select(ivec_bld
, sel_f2
, maxmy1
, y1
);
1574 next_xcoords
[1][1] = lp_build_select(ivec_bld
, sel_f23
, tmp
, ivec_bld
->zero
);
1575 next_xcoords
[0][1] = lp_build_sub(ivec_bld
, max_coord
, next_xcoords
[1][1]);
1577 sel_fand1
= lp_build_cmp(ivec_bld
, PIPE_FUNC_EQUAL
, faceand1
, ivec_bld
->one
);
1579 tmpsel
= lp_build_select(ivec_bld
, sel_fand1
, ivec_bld
->zero
, max_coord
);
1580 maxmx0
= lp_build_sub(ivec_bld
, max_coord
, x0
);
1581 tmp
= lp_build_select(ivec_bld
, sel_fand1
, maxmx0
, x0
);
1582 next_xcoords
[3][0] = lp_build_select(ivec_bld
, sel_f2345
, tmp
, tmpsel
);
1583 tmp
= lp_build_sub(ivec_bld
, max_coord
, next_xcoords
[3][0]);
1584 next_xcoords
[2][0] = lp_build_select(ivec_bld
, sel_f23
, tmp
, next_xcoords
[3][0]);
1585 maxmx1
= lp_build_sub(ivec_bld
, max_coord
, x1
);
1586 tmp
= lp_build_select(ivec_bld
, sel_fand1
, maxmx1
, x1
);
1587 next_xcoords
[3][1] = lp_build_select(ivec_bld
, sel_f2345
, tmp
, tmpsel
);
1588 tmp
= lp_build_sub(ivec_bld
, max_coord
, next_xcoords
[3][1]);
1589 next_xcoords
[2][1] = lp_build_select(ivec_bld
, sel_f23
, tmp
, next_xcoords
[3][1]);
1592 * new ycoords (for face 012345):
1593 * x < 0.0 : t t 0 max t t
1594 * x >= 1.0 : t t 0 max t t
1595 * y < 0.0 : max-s s 0 max max 0
1596 * y >= 1.0 : s max-s 0 max 0 max
1598 * ncy[0] = face & ~4 > 1 ? (face == 2 ? 0 : max) : t
1600 * ncy[3] = face > 1 ? (face & 1 ? max : 0) : (face & 1) ? max-s : max
1601 * ncx[2] = face & ~4 > 1 ? max - ncx[3] : ncx[3]
1603 tmp
= lp_build_select(ivec_bld
, sel_f2
, ivec_bld
->zero
, max_coord
);
1604 next_ycoords
[0][0] = lp_build_select(ivec_bld
, sel_f23
, tmp
, y0
);
1605 next_ycoords
[1][0] = next_ycoords
[0][0];
1606 next_ycoords
[0][1] = lp_build_select(ivec_bld
, sel_f23
, tmp
, y1
);
1607 next_ycoords
[1][1] = next_ycoords
[0][1];
1609 tmpsel
= lp_build_select(ivec_bld
, sel_fand1
, maxmx0
, x0
);
1610 tmp
= lp_build_select(ivec_bld
, sel_fand1
, max_coord
, ivec_bld
->zero
);
1611 next_ycoords
[3][0] = lp_build_select(ivec_bld
, sel_f2345
, tmp
, tmpsel
);
1612 tmp
= lp_build_sub(ivec_bld
, max_coord
, next_ycoords
[3][0]);
1613 next_ycoords
[2][0] = lp_build_select(ivec_bld
, sel_f23
, next_ycoords
[3][0], tmp
);
1614 tmpsel
= lp_build_select(ivec_bld
, sel_fand1
, maxmx1
, x1
);
1615 tmp
= lp_build_select(ivec_bld
, sel_fand1
, max_coord
, ivec_bld
->zero
);
1616 next_ycoords
[3][1] = lp_build_select(ivec_bld
, sel_f2345
, tmp
, tmpsel
);
1617 tmp
= lp_build_sub(ivec_bld
, max_coord
, next_ycoords
[3][1]);
1618 next_ycoords
[2][1] = lp_build_select(ivec_bld
, sel_f23
, next_ycoords
[3][1], tmp
);
1622 /** Helper used by lp_build_cube_lookup() */
1624 lp_build_cube_imapos(struct lp_build_context
*coord_bld
, LLVMValueRef coord
)
1626 /* ima = +0.5 / abs(coord); */
1627 LLVMValueRef posHalf
= lp_build_const_vec(coord_bld
->gallivm
, coord_bld
->type
, 0.5);
1628 LLVMValueRef absCoord
= lp_build_abs(coord_bld
, coord
);
1629 LLVMValueRef ima
= lp_build_div(coord_bld
, posHalf
, absCoord
);
1634 /** Helper for doing 3-wise selection.
1635 * Returns sel1 ? val2 : (sel0 ? val0 : val1).
1638 lp_build_select3(struct lp_build_context
*sel_bld
,
1646 tmp
= lp_build_select(sel_bld
, sel0
, val0
, val1
);
1647 return lp_build_select(sel_bld
, sel1
, val2
, tmp
);
1652 * Generate code to do cube face selection and compute per-face texcoords.
1655 lp_build_cube_lookup(struct lp_build_sample_context
*bld
,
1656 LLVMValueRef
*coords
,
1657 const struct lp_derivatives
*derivs_in
, /* optional */
1659 struct lp_derivatives
*derivs_out
, /* optional */
1660 boolean need_derivs
)
1662 struct lp_build_context
*coord_bld
= &bld
->coord_bld
;
1663 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
1664 struct gallivm_state
*gallivm
= bld
->gallivm
;
1665 LLVMValueRef si
, ti
, ri
;
1668 * Do per-pixel face selection. We cannot however (as we used to do)
1669 * simply calculate the derivs afterwards (which is very bogus for
1670 * explicit derivs btw) because the values would be "random" when
1671 * not all pixels lie on the same face. So what we do here is just
1672 * calculate the derivatives after scaling the coords by the absolute
1673 * value of the inverse major axis, and essentially do rho calculation
1674 * steps as if it were a 3d texture. This is perfect if all pixels hit
1675 * the same face, but not so great at edges, I believe the max error
1676 * should be sqrt(2) with no_rho_approx or 2 otherwise (essentially measuring
1677 * the 3d distance between 2 points on the cube instead of measuring up/down
1678 * the edge). Still this is possibly a win over just selecting the same face
1679 * for all pixels. Unfortunately, something like that doesn't work for
1680 * explicit derivatives.
1682 struct lp_build_context
*cint_bld
= &bld
->int_coord_bld
;
1683 struct lp_type intctype
= cint_bld
->type
;
1684 LLVMTypeRef coord_vec_type
= coord_bld
->vec_type
;
1685 LLVMTypeRef cint_vec_type
= cint_bld
->vec_type
;
1686 LLVMValueRef as
, at
, ar
, face
, face_s
, face_t
;
1687 LLVMValueRef as_ge_at
, maxasat
, ar_ge_as_at
;
1688 LLVMValueRef snewx
, tnewx
, snewy
, tnewy
, snewz
, tnewz
;
1689 LLVMValueRef tnegi
, rnegi
;
1690 LLVMValueRef ma
, mai
, signma
, signmabit
, imahalfpos
;
1691 LLVMValueRef posHalf
= lp_build_const_vec(gallivm
, coord_bld
->type
, 0.5);
1692 LLVMValueRef signmask
= lp_build_const_int_vec(gallivm
, intctype
,
1693 1LL << (intctype
.width
- 1));
1694 LLVMValueRef signshift
= lp_build_const_int_vec(gallivm
, intctype
,
1696 LLVMValueRef facex
= lp_build_const_int_vec(gallivm
, intctype
, PIPE_TEX_FACE_POS_X
);
1697 LLVMValueRef facey
= lp_build_const_int_vec(gallivm
, intctype
, PIPE_TEX_FACE_POS_Y
);
1698 LLVMValueRef facez
= lp_build_const_int_vec(gallivm
, intctype
, PIPE_TEX_FACE_POS_Z
);
1699 LLVMValueRef s
= coords
[0];
1700 LLVMValueRef t
= coords
[1];
1701 LLVMValueRef r
= coords
[2];
1703 assert(PIPE_TEX_FACE_NEG_X
== PIPE_TEX_FACE_POS_X
+ 1);
1704 assert(PIPE_TEX_FACE_NEG_Y
== PIPE_TEX_FACE_POS_Y
+ 1);
1705 assert(PIPE_TEX_FACE_NEG_Z
== PIPE_TEX_FACE_POS_Z
+ 1);
1708 * get absolute value (for x/y/z face selection) and sign bit
1709 * (for mirroring minor coords and pos/neg face selection)
1710 * of the original coords.
1712 as
= lp_build_abs(&bld
->coord_bld
, s
);
1713 at
= lp_build_abs(&bld
->coord_bld
, t
);
1714 ar
= lp_build_abs(&bld
->coord_bld
, r
);
1717 * major face determination: select x if x > y else select y
1718 * select z if z >= max(x,y) else select previous result
1719 * if some axis are the same we chose z over y, y over x - the
1720 * dx10 spec seems to ask for it while OpenGL doesn't care (if we
1721 * wouldn't care could save a select or two if using different
1722 * compares and doing at_g_as_ar last since tnewx and tnewz are the
1725 as_ge_at
= lp_build_cmp(coord_bld
, PIPE_FUNC_GREATER
, as
, at
);
1726 maxasat
= lp_build_max(coord_bld
, as
, at
);
1727 ar_ge_as_at
= lp_build_cmp(coord_bld
, PIPE_FUNC_GEQUAL
, ar
, maxasat
);
1729 if (need_derivs
&& (derivs_in
|| (bld
->no_quad_lod
&& bld
->no_rho_approx
))) {
1731 * XXX: This is really really complex.
1732 * It is a bit overkill to use this for implicit derivatives as well,
1733 * no way this is worth the cost in practice, but seems to be the
1734 * only way for getting accurate and per-pixel lod values.
1736 LLVMValueRef ima
, imahalf
, tmp
, ddx
[3], ddy
[3];
1737 LLVMValueRef madx
, mady
, madxdivma
, madydivma
;
1738 LLVMValueRef sdxi
, tdxi
, rdxi
, sdyi
, tdyi
, rdyi
;
1739 LLVMValueRef tdxnegi
, rdxnegi
, tdynegi
, rdynegi
;
1740 LLVMValueRef sdxnewx
, sdxnewy
, sdxnewz
, tdxnewx
, tdxnewy
, tdxnewz
;
1741 LLVMValueRef sdynewx
, sdynewy
, sdynewz
, tdynewx
, tdynewy
, tdynewz
;
1742 LLVMValueRef face_sdx
, face_tdx
, face_sdy
, face_tdy
;
1744 * s = 1/2 * ( sc / ma + 1)
1745 * t = 1/2 * ( tc / ma + 1)
1747 * s' = 1/2 * (sc' * ma - sc * ma') / ma^2
1748 * t' = 1/2 * (tc' * ma - tc * ma') / ma^2
1750 * dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma
1751 * dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma
1752 * dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma
1753 * dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma
1756 /* select ma, calculate ima */
1757 ma
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, s
, t
, r
);
1758 mai
= LLVMBuildBitCast(builder
, ma
, cint_vec_type
, "");
1759 signmabit
= LLVMBuildAnd(builder
, mai
, signmask
, "");
1760 ima
= lp_build_div(coord_bld
, coord_bld
->one
, ma
);
1761 imahalf
= lp_build_mul(coord_bld
, posHalf
, ima
);
1762 imahalfpos
= lp_build_abs(coord_bld
, imahalf
);
1765 ddx
[0] = lp_build_ddx(coord_bld
, s
);
1766 ddx
[1] = lp_build_ddx(coord_bld
, t
);
1767 ddx
[2] = lp_build_ddx(coord_bld
, r
);
1768 ddy
[0] = lp_build_ddy(coord_bld
, s
);
1769 ddy
[1] = lp_build_ddy(coord_bld
, t
);
1770 ddy
[2] = lp_build_ddy(coord_bld
, r
);
1773 ddx
[0] = derivs_in
->ddx
[0];
1774 ddx
[1] = derivs_in
->ddx
[1];
1775 ddx
[2] = derivs_in
->ddx
[2];
1776 ddy
[0] = derivs_in
->ddy
[0];
1777 ddy
[1] = derivs_in
->ddy
[1];
1778 ddy
[2] = derivs_in
->ddy
[2];
1781 /* select major derivatives */
1782 madx
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, ddx
[0], ddx
[1], ddx
[2]);
1783 mady
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, ddy
[0], ddy
[1], ddy
[2]);
1785 si
= LLVMBuildBitCast(builder
, s
, cint_vec_type
, "");
1786 ti
= LLVMBuildBitCast(builder
, t
, cint_vec_type
, "");
1787 ri
= LLVMBuildBitCast(builder
, r
, cint_vec_type
, "");
1789 sdxi
= LLVMBuildBitCast(builder
, ddx
[0], cint_vec_type
, "");
1790 tdxi
= LLVMBuildBitCast(builder
, ddx
[1], cint_vec_type
, "");
1791 rdxi
= LLVMBuildBitCast(builder
, ddx
[2], cint_vec_type
, "");
1793 sdyi
= LLVMBuildBitCast(builder
, ddy
[0], cint_vec_type
, "");
1794 tdyi
= LLVMBuildBitCast(builder
, ddy
[1], cint_vec_type
, "");
1795 rdyi
= LLVMBuildBitCast(builder
, ddy
[2], cint_vec_type
, "");
1798 * compute all possible new s/t coords, which does the mirroring,
1799 * and do the same for derivs minor axes.
1800 * snewx = signma * -r;
1803 * tnewy = signma * r;
1804 * snewz = signma * s;
1807 tnegi
= LLVMBuildXor(builder
, ti
, signmask
, "");
1808 rnegi
= LLVMBuildXor(builder
, ri
, signmask
, "");
1809 tdxnegi
= LLVMBuildXor(builder
, tdxi
, signmask
, "");
1810 rdxnegi
= LLVMBuildXor(builder
, rdxi
, signmask
, "");
1811 tdynegi
= LLVMBuildXor(builder
, tdyi
, signmask
, "");
1812 rdynegi
= LLVMBuildXor(builder
, rdyi
, signmask
, "");
1814 snewx
= LLVMBuildXor(builder
, signmabit
, rnegi
, "");
1816 sdxnewx
= LLVMBuildXor(builder
, signmabit
, rdxnegi
, "");
1818 sdynewx
= LLVMBuildXor(builder
, signmabit
, rdynegi
, "");
1822 tnewy
= LLVMBuildXor(builder
, signmabit
, ri
, "");
1824 tdxnewy
= LLVMBuildXor(builder
, signmabit
, rdxi
, "");
1826 tdynewy
= LLVMBuildXor(builder
, signmabit
, rdyi
, "");
1828 snewz
= LLVMBuildXor(builder
, signmabit
, si
, "");
1830 sdxnewz
= LLVMBuildXor(builder
, signmabit
, sdxi
, "");
1832 sdynewz
= LLVMBuildXor(builder
, signmabit
, sdyi
, "");
1835 /* select the mirrored values */
1836 face
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, facex
, facey
, facez
);
1837 face_s
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, snewx
, snewy
, snewz
);
1838 face_t
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, tnewx
, tnewy
, tnewz
);
1839 face_sdx
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, sdxnewx
, sdxnewy
, sdxnewz
);
1840 face_tdx
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, tdxnewx
, tdxnewy
, tdxnewz
);
1841 face_sdy
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, sdynewx
, sdynewy
, sdynewz
);
1842 face_tdy
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, tdynewx
, tdynewy
, tdynewz
);
1844 face_s
= LLVMBuildBitCast(builder
, face_s
, coord_vec_type
, "");
1845 face_t
= LLVMBuildBitCast(builder
, face_t
, coord_vec_type
, "");
1846 face_sdx
= LLVMBuildBitCast(builder
, face_sdx
, coord_vec_type
, "");
1847 face_tdx
= LLVMBuildBitCast(builder
, face_tdx
, coord_vec_type
, "");
1848 face_sdy
= LLVMBuildBitCast(builder
, face_sdy
, coord_vec_type
, "");
1849 face_tdy
= LLVMBuildBitCast(builder
, face_tdy
, coord_vec_type
, "");
1851 /* deriv math, dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma */
1852 madxdivma
= lp_build_mul(coord_bld
, madx
, ima
);
1853 tmp
= lp_build_mul(coord_bld
, madxdivma
, face_s
);
1854 tmp
= lp_build_sub(coord_bld
, face_sdx
, tmp
);
1855 derivs_out
->ddx
[0] = lp_build_mul(coord_bld
, tmp
, imahalf
);
1857 /* dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma */
1858 tmp
= lp_build_mul(coord_bld
, madxdivma
, face_t
);
1859 tmp
= lp_build_sub(coord_bld
, face_tdx
, tmp
);
1860 derivs_out
->ddx
[1] = lp_build_mul(coord_bld
, tmp
, imahalf
);
1862 /* dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma */
1863 madydivma
= lp_build_mul(coord_bld
, mady
, ima
);
1864 tmp
= lp_build_mul(coord_bld
, madydivma
, face_s
);
1865 tmp
= lp_build_sub(coord_bld
, face_sdy
, tmp
);
1866 derivs_out
->ddy
[0] = lp_build_mul(coord_bld
, tmp
, imahalf
);
1868 /* dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma */
1869 tmp
= lp_build_mul(coord_bld
, madydivma
, face_t
);
1870 tmp
= lp_build_sub(coord_bld
, face_tdy
, tmp
);
1871 derivs_out
->ddy
[1] = lp_build_mul(coord_bld
, tmp
, imahalf
);
1873 signma
= LLVMBuildLShr(builder
, mai
, signshift
, "");
1874 coords
[2] = LLVMBuildOr(builder
, face
, signma
, "face");
1876 /* project coords */
1877 face_s
= lp_build_mul(coord_bld
, face_s
, imahalfpos
);
1878 face_t
= lp_build_mul(coord_bld
, face_t
, imahalfpos
);
1880 coords
[0] = lp_build_add(coord_bld
, face_s
, posHalf
);
1881 coords
[1] = lp_build_add(coord_bld
, face_t
, posHalf
);
1886 else if (need_derivs
) {
1887 LLVMValueRef ddx_ddy
[2], tmp
[3], rho_vec
;
1888 static const unsigned char swizzle0
[] = { /* no-op swizzle */
1889 0, LP_BLD_SWIZZLE_DONTCARE
,
1890 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1892 static const unsigned char swizzle1
[] = {
1893 1, LP_BLD_SWIZZLE_DONTCARE
,
1894 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1896 static const unsigned char swizzle01
[] = { /* no-op swizzle */
1898 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1900 static const unsigned char swizzle23
[] = {
1902 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1904 static const unsigned char swizzle02
[] = {
1906 LP_BLD_SWIZZLE_DONTCARE
, LP_BLD_SWIZZLE_DONTCARE
1910 * scale the s/t/r coords pre-select/mirror so we can calculate
1911 * "reasonable" derivs.
1913 ma
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, s
, t
, r
);
1914 imahalfpos
= lp_build_cube_imapos(coord_bld
, ma
);
1915 s
= lp_build_mul(coord_bld
, s
, imahalfpos
);
1916 t
= lp_build_mul(coord_bld
, t
, imahalfpos
);
1917 r
= lp_build_mul(coord_bld
, r
, imahalfpos
);
1920 * This isn't quite the same as the "ordinary" (3d deriv) path since we
1921 * know the texture is square which simplifies things (we can omit the
1922 * size mul which happens very early completely here and do it at the
1924 * Also always do calculations according to GALLIVM_DEBUG_NO_RHO_APPROX
1925 * since the error can get quite big otherwise at edges.
1926 * (With no_rho_approx max error is sqrt(2) at edges, same as it is
1927 * without no_rho_approx for 2d textures, otherwise it would be factor 2.)
1929 ddx_ddy
[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld
, s
, t
);
1930 ddx_ddy
[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld
, r
);
1932 ddx_ddy
[0] = lp_build_mul(coord_bld
, ddx_ddy
[0], ddx_ddy
[0]);
1933 ddx_ddy
[1] = lp_build_mul(coord_bld
, ddx_ddy
[1], ddx_ddy
[1]);
1935 tmp
[0] = lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle01
);
1936 tmp
[1] = lp_build_swizzle_aos(coord_bld
, ddx_ddy
[0], swizzle23
);
1937 tmp
[2] = lp_build_swizzle_aos(coord_bld
, ddx_ddy
[1], swizzle02
);
1939 rho_vec
= lp_build_add(coord_bld
, tmp
[0], tmp
[1]);
1940 rho_vec
= lp_build_add(coord_bld
, rho_vec
, tmp
[2]);
1942 tmp
[0] = lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle0
);
1943 tmp
[1] = lp_build_swizzle_aos(coord_bld
, rho_vec
, swizzle1
);
1944 *rho
= lp_build_max(coord_bld
, tmp
[0], tmp
[1]);
1948 ma
= lp_build_select3(coord_bld
, as_ge_at
, ar_ge_as_at
, s
, t
, r
);
1950 mai
= LLVMBuildBitCast(builder
, ma
, cint_vec_type
, "");
1951 signmabit
= LLVMBuildAnd(builder
, mai
, signmask
, "");
1953 si
= LLVMBuildBitCast(builder
, s
, cint_vec_type
, "");
1954 ti
= LLVMBuildBitCast(builder
, t
, cint_vec_type
, "");
1955 ri
= LLVMBuildBitCast(builder
, r
, cint_vec_type
, "");
1958 * compute all possible new s/t coords, which does the mirroring
1959 * snewx = signma * -r;
1962 * tnewy = signma * r;
1963 * snewz = signma * s;
1966 tnegi
= LLVMBuildXor(builder
, ti
, signmask
, "");
1967 rnegi
= LLVMBuildXor(builder
, ri
, signmask
, "");
1969 snewx
= LLVMBuildXor(builder
, signmabit
, rnegi
, "");
1973 tnewy
= LLVMBuildXor(builder
, signmabit
, ri
, "");
1975 snewz
= LLVMBuildXor(builder
, signmabit
, si
, "");
1978 /* select the mirrored values */
1979 face_s
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, snewx
, snewy
, snewz
);
1980 face_t
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, tnewx
, tnewy
, tnewz
);
1981 face
= lp_build_select3(cint_bld
, as_ge_at
, ar_ge_as_at
, facex
, facey
, facez
);
1983 face_s
= LLVMBuildBitCast(builder
, face_s
, coord_vec_type
, "");
1984 face_t
= LLVMBuildBitCast(builder
, face_t
, coord_vec_type
, "");
1986 /* add +1 for neg face */
1987 /* XXX with AVX probably want to use another select here -
1988 * as long as we ensure vblendvps gets used we can actually
1989 * skip the comparison and just use sign as a "mask" directly.
1991 signma
= LLVMBuildLShr(builder
, mai
, signshift
, "");
1992 coords
[2] = LLVMBuildOr(builder
, face
, signma
, "face");
1994 /* project coords */
1996 imahalfpos
= lp_build_cube_imapos(coord_bld
, ma
);
1997 face_s
= lp_build_mul(coord_bld
, face_s
, imahalfpos
);
1998 face_t
= lp_build_mul(coord_bld
, face_t
, imahalfpos
);
2001 coords
[0] = lp_build_add(coord_bld
, face_s
, posHalf
);
2002 coords
[1] = lp_build_add(coord_bld
, face_t
, posHalf
);
2007 * Compute the partial offset of a pixel block along an arbitrary axis.
2009 * @param coord coordinate in pixels
2010 * @param stride number of bytes between rows of successive pixel blocks
2011 * @param block_length number of pixels in a pixels block along the coordinate
2013 * @param out_offset resulting relative offset of the pixel block in bytes
2014 * @param out_subcoord resulting sub-block pixel coordinate
2017 lp_build_sample_partial_offset(struct lp_build_context
*bld
,
2018 unsigned block_length
,
2020 LLVMValueRef stride
,
2021 LLVMValueRef
*out_offset
,
2022 LLVMValueRef
*out_subcoord
)
2024 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
2025 LLVMValueRef offset
;
2026 LLVMValueRef subcoord
;
2028 if (block_length
== 1) {
2029 subcoord
= bld
->zero
;
2033 * Pixel blocks have power of two dimensions. LLVM should convert the
2034 * rem/div to bit arithmetic.
2035 * TODO: Verify this.
2036 * It does indeed BUT it does transform it to scalar (and back) when doing so
2037 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
2038 * The generated code looks seriously unfunny and is quite expensive.
2041 LLVMValueRef block_width
= lp_build_const_int_vec(bld
->type
, block_length
);
2042 subcoord
= LLVMBuildURem(builder
, coord
, block_width
, "");
2043 coord
= LLVMBuildUDiv(builder
, coord
, block_width
, "");
2045 unsigned logbase2
= util_logbase2(block_length
);
2046 LLVMValueRef block_shift
= lp_build_const_int_vec(bld
->gallivm
, bld
->type
, logbase2
);
2047 LLVMValueRef block_mask
= lp_build_const_int_vec(bld
->gallivm
, bld
->type
, block_length
- 1);
2048 subcoord
= LLVMBuildAnd(builder
, coord
, block_mask
, "");
2049 coord
= LLVMBuildLShr(builder
, coord
, block_shift
, "");
2053 offset
= lp_build_mul(bld
, coord
, stride
);
2056 assert(out_subcoord
);
2058 *out_offset
= offset
;
2059 *out_subcoord
= subcoord
;
2064 * Compute the offset of a pixel block.
2066 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
2068 * Returns the relative offset and i,j sub-block coordinates
2071 lp_build_sample_offset(struct lp_build_context
*bld
,
2072 const struct util_format_description
*format_desc
,
2076 LLVMValueRef y_stride
,
2077 LLVMValueRef z_stride
,
2078 LLVMValueRef
*out_offset
,
2079 LLVMValueRef
*out_i
,
2080 LLVMValueRef
*out_j
)
2082 LLVMValueRef x_stride
;
2083 LLVMValueRef offset
;
2085 x_stride
= lp_build_const_vec(bld
->gallivm
, bld
->type
,
2086 format_desc
->block
.bits
/8);
2088 lp_build_sample_partial_offset(bld
,
2089 format_desc
->block
.width
,
2093 if (y
&& y_stride
) {
2094 LLVMValueRef y_offset
;
2095 lp_build_sample_partial_offset(bld
,
2096 format_desc
->block
.height
,
2099 offset
= lp_build_add(bld
, offset
, y_offset
);
2105 if (z
&& z_stride
) {
2106 LLVMValueRef z_offset
;
2108 lp_build_sample_partial_offset(bld
,
2109 1, /* pixel blocks are always 2D */
2112 offset
= lp_build_add(bld
, offset
, z_offset
);
2115 *out_offset
= offset
;