gallivm: handle explicit derivatives for cubemaps
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- common code.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_state.h"
37 #include "util/u_format.h"
38 #include "util/u_math.h"
39 #include "lp_bld_arit.h"
40 #include "lp_bld_const.h"
41 #include "lp_bld_debug.h"
42 #include "lp_bld_printf.h"
43 #include "lp_bld_flow.h"
44 #include "lp_bld_sample.h"
45 #include "lp_bld_swizzle.h"
46 #include "lp_bld_type.h"
47 #include "lp_bld_logic.h"
48 #include "lp_bld_pack.h"
49 #include "lp_bld_quad.h"
50 #include "lp_bld_bitarit.h"
51
52
53 /*
54 * Bri-linear factor. Should be greater than one.
55 */
56 #define BRILINEAR_FACTOR 2
57
58 /**
59 * Does the given texture wrap mode allow sampling the texture border color?
60 * XXX maybe move this into gallium util code.
61 */
62 boolean
63 lp_sampler_wrap_mode_uses_border_color(unsigned mode,
64 unsigned min_img_filter,
65 unsigned mag_img_filter)
66 {
67 switch (mode) {
68 case PIPE_TEX_WRAP_REPEAT:
69 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
70 case PIPE_TEX_WRAP_MIRROR_REPEAT:
71 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
72 return FALSE;
73 case PIPE_TEX_WRAP_CLAMP:
74 case PIPE_TEX_WRAP_MIRROR_CLAMP:
75 if (min_img_filter == PIPE_TEX_FILTER_NEAREST &&
76 mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
77 return FALSE;
78 } else {
79 return TRUE;
80 }
81 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
82 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
83 return TRUE;
84 default:
85 assert(0 && "unexpected wrap mode");
86 return FALSE;
87 }
88 }
89
90
91 /**
92 * Initialize lp_sampler_static_texture_state object with the gallium
93 * texture/sampler_view state (this contains the parts which are
94 * considered static).
95 */
96 void
97 lp_sampler_static_texture_state(struct lp_static_texture_state *state,
98 const struct pipe_sampler_view *view)
99 {
100 const struct pipe_resource *texture;
101
102 memset(state, 0, sizeof *state);
103
104 if (!view || !view->texture)
105 return;
106
107 texture = view->texture;
108
109 state->format = view->format;
110 state->swizzle_r = view->swizzle_r;
111 state->swizzle_g = view->swizzle_g;
112 state->swizzle_b = view->swizzle_b;
113 state->swizzle_a = view->swizzle_a;
114
115 state->target = texture->target;
116 state->pot_width = util_is_power_of_two(texture->width0);
117 state->pot_height = util_is_power_of_two(texture->height0);
118 state->pot_depth = util_is_power_of_two(texture->depth0);
119 state->level_zero_only = !view->u.tex.last_level;
120
121 /*
122 * the layer / element / level parameters are all either dynamic
123 * state or handled transparently wrt execution.
124 */
125 }
126
127
128 /**
129 * Initialize lp_sampler_static_sampler_state object with the gallium sampler
130 * state (this contains the parts which are considered static).
131 */
132 void
133 lp_sampler_static_sampler_state(struct lp_static_sampler_state *state,
134 const struct pipe_sampler_state *sampler)
135 {
136 memset(state, 0, sizeof *state);
137
138 if (!sampler)
139 return;
140
141 /*
142 * We don't copy sampler state over unless it is actually enabled, to avoid
143 * spurious recompiles, as the sampler static state is part of the shader
144 * key.
145 *
146 * Ideally the state tracker or cso_cache module would make all state
147 * canonical, but until that happens it's better to be safe than sorry here.
148 *
149 * XXX: Actually there's much more than can be done here, especially
150 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
151 */
152
153 state->wrap_s = sampler->wrap_s;
154 state->wrap_t = sampler->wrap_t;
155 state->wrap_r = sampler->wrap_r;
156 state->min_img_filter = sampler->min_img_filter;
157 state->mag_img_filter = sampler->mag_img_filter;
158 state->seamless_cube_map = sampler->seamless_cube_map;
159
160 if (sampler->max_lod > 0.0f) {
161 state->min_mip_filter = sampler->min_mip_filter;
162 } else {
163 state->min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
164 }
165
166 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE ||
167 state->min_img_filter != state->mag_img_filter) {
168 if (sampler->lod_bias != 0.0f) {
169 state->lod_bias_non_zero = 1;
170 }
171
172 /* If min_lod == max_lod we can greatly simplify mipmap selection.
173 * This is a case that occurs during automatic mipmap generation.
174 */
175 if (sampler->min_lod == sampler->max_lod) {
176 state->min_max_lod_equal = 1;
177 } else {
178 if (sampler->min_lod > 0.0f) {
179 state->apply_min_lod = 1;
180 }
181
182 /*
183 * XXX this won't do anything with the mesa state tracker which always
184 * sets max_lod to not more than actually present mip maps...
185 */
186 if (sampler->max_lod < (PIPE_MAX_TEXTURE_LEVELS - 1)) {
187 state->apply_max_lod = 1;
188 }
189 }
190 }
191
192 state->compare_mode = sampler->compare_mode;
193 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) {
194 state->compare_func = sampler->compare_func;
195 }
196
197 state->normalized_coords = sampler->normalized_coords;
198 }
199
200
201 /**
202 * Generate code to compute coordinate gradient (rho).
203 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
204 *
205 * The resulting rho has bld->levelf format (per quad or per element).
206 */
207 static LLVMValueRef
208 lp_build_rho(struct lp_build_sample_context *bld,
209 unsigned texture_unit,
210 LLVMValueRef s,
211 LLVMValueRef t,
212 LLVMValueRef r,
213 LLVMValueRef cube_rho,
214 const struct lp_derivatives *derivs)
215 {
216 struct gallivm_state *gallivm = bld->gallivm;
217 struct lp_build_context *int_size_bld = &bld->int_size_in_bld;
218 struct lp_build_context *float_size_bld = &bld->float_size_in_bld;
219 struct lp_build_context *float_bld = &bld->float_bld;
220 struct lp_build_context *coord_bld = &bld->coord_bld;
221 struct lp_build_context *rho_bld = &bld->lodf_bld;
222 const unsigned dims = bld->dims;
223 LLVMValueRef ddx_ddy[2];
224 LLVMBuilderRef builder = bld->gallivm->builder;
225 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
226 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
227 LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0);
228 LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0);
229 LLVMValueRef rho_vec;
230 LLVMValueRef int_size, float_size;
231 LLVMValueRef rho;
232 LLVMValueRef first_level, first_level_vec;
233 unsigned length = coord_bld->type.length;
234 unsigned num_quads = length / 4;
235 boolean rho_per_quad = rho_bld->type.length != length;
236 boolean no_rho_opt = (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) && (dims > 1);
237 unsigned i;
238 LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
239 LLVMValueRef rho_xvec, rho_yvec;
240
241 /* Note that all simplified calculations will only work for isotropic filtering */
242
243 /*
244 * rho calcs are always per quad except for explicit derivs (excluding
245 * the messy cube maps for now) when requested.
246 */
247
248 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
249 bld->gallivm, texture_unit);
250 first_level_vec = lp_build_broadcast_scalar(int_size_bld, first_level);
251 int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec);
252 float_size = lp_build_int_to_float(float_size_bld, int_size);
253
254 if (cube_rho) {
255 LLVMValueRef cubesize;
256 LLVMValueRef index0 = lp_build_const_int32(gallivm, 0);
257
258 /*
259 * Cube map code did already everything except size mul and per-quad extraction.
260 * Luckily cube maps are always quadratic!
261 */
262 if (rho_per_quad) {
263 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
264 rho_bld->type, cube_rho, 0);
265 }
266 else {
267 rho = lp_build_swizzle_scalar_aos(coord_bld, cube_rho, 0, 4);
268 }
269 /* Could optimize this for single quad just skip the broadcast */
270 cubesize = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
271 rho_bld->type, float_size, index0);
272 /* skipping sqrt hence returning rho squared */
273 cubesize = lp_build_mul(rho_bld, cubesize, cubesize);
274 rho = lp_build_mul(rho_bld, cubesize, rho);
275 }
276 else if (derivs) {
277 LLVMValueRef ddmax[3], ddx[3], ddy[3];
278 for (i = 0; i < dims; i++) {
279 LLVMValueRef floatdim;
280 LLVMValueRef indexi = lp_build_const_int32(gallivm, i);
281
282 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
283 coord_bld->type, float_size, indexi);
284
285 /*
286 * note that for rho_per_quad case could reduce math (at some shuffle
287 * cost), but for now use same code to per-pixel lod case.
288 */
289 if (no_rho_opt) {
290 ddx[i] = lp_build_mul(coord_bld, floatdim, derivs->ddx[i]);
291 ddy[i] = lp_build_mul(coord_bld, floatdim, derivs->ddy[i]);
292 ddx[i] = lp_build_mul(coord_bld, ddx[i], ddx[i]);
293 ddy[i] = lp_build_mul(coord_bld, ddy[i], ddy[i]);
294 }
295 else {
296 LLVMValueRef tmpx, tmpy;
297 tmpx = lp_build_abs(coord_bld, derivs->ddx[i]);
298 tmpy = lp_build_abs(coord_bld, derivs->ddy[i]);
299 ddmax[i] = lp_build_max(coord_bld, tmpx, tmpy);
300 ddmax[i] = lp_build_mul(coord_bld, floatdim, ddmax[i]);
301 }
302 }
303 if (no_rho_opt) {
304 rho_xvec = lp_build_add(coord_bld, ddx[0], ddx[1]);
305 rho_yvec = lp_build_add(coord_bld, ddy[0], ddy[1]);
306 if (dims > 2) {
307 rho_xvec = lp_build_add(coord_bld, rho_xvec, ddx[2]);
308 rho_yvec = lp_build_add(coord_bld, rho_yvec, ddy[2]);
309 }
310 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
311 /* skipping sqrt hence returning rho squared */
312 }
313 else {
314 rho = ddmax[0];
315 if (dims > 1) {
316 rho = lp_build_max(coord_bld, rho, ddmax[1]);
317 if (dims > 2) {
318 rho = lp_build_max(coord_bld, rho, ddmax[2]);
319 }
320 }
321 }
322 if (rho_per_quad) {
323 /*
324 * rho_vec contains per-pixel rho, convert to scalar per quad.
325 */
326 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
327 rho_bld->type, rho, 0);
328 }
329 }
330 else {
331 /*
332 * This looks all a bit complex, but it's not that bad
333 * (the shuffle code makes it look worse than it is).
334 * Still, might not be ideal for all cases.
335 */
336 static const unsigned char swizzle0[] = { /* no-op swizzle */
337 0, LP_BLD_SWIZZLE_DONTCARE,
338 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
339 };
340 static const unsigned char swizzle1[] = {
341 1, LP_BLD_SWIZZLE_DONTCARE,
342 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
343 };
344 static const unsigned char swizzle2[] = {
345 2, LP_BLD_SWIZZLE_DONTCARE,
346 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
347 };
348
349 if (dims < 2) {
350 ddx_ddy[0] = lp_build_packed_ddx_ddy_onecoord(coord_bld, s);
351 }
352 else if (dims >= 2) {
353 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
354 if (dims > 2) {
355 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
356 }
357 }
358
359 if (no_rho_opt) {
360 static const unsigned char swizzle01[] = { /* no-op swizzle */
361 0, 1,
362 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
363 };
364 static const unsigned char swizzle23[] = {
365 2, 3,
366 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
367 };
368 LLVMValueRef ddx_ddys, ddx_ddyt, floatdim, shuffles[LP_MAX_VECTOR_LENGTH / 4];
369
370 for (i = 0; i < num_quads; i++) {
371 shuffles[i*4+0] = shuffles[i*4+1] = index0;
372 shuffles[i*4+2] = shuffles[i*4+3] = index1;
373 }
374 floatdim = LLVMBuildShuffleVector(builder, float_size, float_size,
375 LLVMConstVector(shuffles, length), "");
376 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], floatdim);
377 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
378 ddx_ddys = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
379 ddx_ddyt = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
380 rho_vec = lp_build_add(coord_bld, ddx_ddys, ddx_ddyt);
381
382 if (dims > 2) {
383 static const unsigned char swizzle02[] = {
384 0, 2,
385 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
386 };
387 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
388 coord_bld->type, float_size, index2);
389 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], floatdim);
390 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
391 ddx_ddy[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
392 rho_vec = lp_build_add(coord_bld, rho_vec, ddx_ddy[1]);
393 }
394
395 rho_xvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
396 rho_yvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
397 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
398
399 if (rho_per_quad) {
400 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
401 rho_bld->type, rho, 0);
402 }
403 else {
404 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
405 }
406 /* skipping sqrt hence returning rho squared */
407 }
408 else {
409 ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
410 if (dims > 2) {
411 ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
412 }
413
414 if (dims < 2) {
415 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle0);
416 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle2);
417 }
418 else if (dims == 2) {
419 static const unsigned char swizzle02[] = {
420 0, 2,
421 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
422 };
423 static const unsigned char swizzle13[] = {
424 1, 3,
425 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
426 };
427 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle02);
428 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle13);
429 }
430 else {
431 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH];
432 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH];
433 assert(dims == 3);
434 for (i = 0; i < num_quads; i++) {
435 shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i);
436 shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2);
437 shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i);
438 shuffles1[4*i + 3] = i32undef;
439 shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1);
440 shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3);
441 shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 2);
442 shuffles2[4*i + 3] = i32undef;
443 }
444 rho_xvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
445 LLVMConstVector(shuffles1, length), "");
446 rho_yvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
447 LLVMConstVector(shuffles2, length), "");
448 }
449
450 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
451
452 if (bld->coord_type.length > 4) {
453 /* expand size to each quad */
454 if (dims > 1) {
455 /* could use some broadcast_vector helper for this? */
456 LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4];
457 for (i = 0; i < num_quads; i++) {
458 src[i] = float_size;
459 }
460 float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads);
461 }
462 else {
463 float_size = lp_build_broadcast_scalar(coord_bld, float_size);
464 }
465 rho_vec = lp_build_mul(coord_bld, rho_vec, float_size);
466
467 if (dims <= 1) {
468 rho = rho_vec;
469 }
470 else {
471 if (dims >= 2) {
472 LLVMValueRef rho_s, rho_t, rho_r;
473
474 rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
475 rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
476
477 rho = lp_build_max(coord_bld, rho_s, rho_t);
478
479 if (dims >= 3) {
480 rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2);
481 rho = lp_build_max(coord_bld, rho, rho_r);
482 }
483 }
484 }
485 if (rho_per_quad) {
486 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
487 rho_bld->type, rho, 0);
488 }
489 else {
490 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
491 }
492 }
493 else {
494 if (dims <= 1) {
495 rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, "");
496 }
497 rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size);
498
499 if (dims <= 1) {
500 rho = rho_vec;
501 }
502 else {
503 if (dims >= 2) {
504 LLVMValueRef rho_s, rho_t, rho_r;
505
506 rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, "");
507 rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, "");
508
509 rho = lp_build_max(float_bld, rho_s, rho_t);
510
511 if (dims >= 3) {
512 rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, "");
513 rho = lp_build_max(float_bld, rho, rho_r);
514 }
515 }
516 }
517 if (!rho_per_quad) {
518 rho = lp_build_broadcast_scalar(rho_bld, rho);
519 }
520 }
521 }
522 }
523
524 return rho;
525 }
526
527
528 /*
529 * Bri-linear lod computation
530 *
531 * Use a piece-wise linear approximation of log2 such that:
532 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
533 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
534 * with the steepness specified in 'factor'
535 * - exact result for 0.5, 1.5, etc.
536 *
537 *
538 * 1.0 - /----*
539 * /
540 * /
541 * /
542 * 0.5 - *
543 * /
544 * /
545 * /
546 * 0.0 - *----/
547 *
548 * | |
549 * 2^0 2^1
550 *
551 * This is a technique also commonly used in hardware:
552 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
553 *
554 * TODO: For correctness, this should only be applied when texture is known to
555 * have regular mipmaps, i.e., mipmaps derived from the base level.
556 *
557 * TODO: This could be done in fixed point, where applicable.
558 */
559 static void
560 lp_build_brilinear_lod(struct lp_build_context *bld,
561 LLVMValueRef lod,
562 double factor,
563 LLVMValueRef *out_lod_ipart,
564 LLVMValueRef *out_lod_fpart)
565 {
566 LLVMValueRef lod_fpart;
567 double pre_offset = (factor - 0.5)/factor - 0.5;
568 double post_offset = 1 - factor;
569
570 if (0) {
571 lp_build_printf(bld->gallivm, "lod = %f\n", lod);
572 }
573
574 lod = lp_build_add(bld, lod,
575 lp_build_const_vec(bld->gallivm, bld->type, pre_offset));
576
577 lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart);
578
579 lod_fpart = lp_build_mul(bld, lod_fpart,
580 lp_build_const_vec(bld->gallivm, bld->type, factor));
581
582 lod_fpart = lp_build_add(bld, lod_fpart,
583 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
584
585 /*
586 * It's not necessary to clamp lod_fpart since:
587 * - the above expression will never produce numbers greater than one.
588 * - the mip filtering branch is only taken if lod_fpart is positive
589 */
590
591 *out_lod_fpart = lod_fpart;
592
593 if (0) {
594 lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart);
595 lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart);
596 }
597 }
598
599
600 /*
601 * Combined log2 and brilinear lod computation.
602 *
603 * It's in all identical to calling lp_build_fast_log2() and
604 * lp_build_brilinear_lod() above, but by combining we can compute the integer
605 * and fractional part independently.
606 */
607 static void
608 lp_build_brilinear_rho(struct lp_build_context *bld,
609 LLVMValueRef rho,
610 double factor,
611 LLVMValueRef *out_lod_ipart,
612 LLVMValueRef *out_lod_fpart)
613 {
614 LLVMValueRef lod_ipart;
615 LLVMValueRef lod_fpart;
616
617 const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor);
618 const double post_offset = 1 - 2*factor;
619
620 assert(bld->type.floating);
621
622 assert(lp_check_value(bld->type, rho));
623
624 /*
625 * The pre factor will make the intersections with the exact powers of two
626 * happen precisely where we want them to be, which means that the integer
627 * part will not need any post adjustments.
628 */
629 rho = lp_build_mul(bld, rho,
630 lp_build_const_vec(bld->gallivm, bld->type, pre_factor));
631
632 /* ipart = ifloor(log2(rho)) */
633 lod_ipart = lp_build_extract_exponent(bld, rho, 0);
634
635 /* fpart = rho / 2**ipart */
636 lod_fpart = lp_build_extract_mantissa(bld, rho);
637
638 lod_fpart = lp_build_mul(bld, lod_fpart,
639 lp_build_const_vec(bld->gallivm, bld->type, factor));
640
641 lod_fpart = lp_build_add(bld, lod_fpart,
642 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
643
644 /*
645 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
646 * - the above expression will never produce numbers greater than one.
647 * - the mip filtering branch is only taken if lod_fpart is positive
648 */
649
650 *out_lod_ipart = lod_ipart;
651 *out_lod_fpart = lod_fpart;
652 }
653
654
655 /**
656 * Fast implementation of iround(log2(sqrt(x))), based on
657 * log2(x^n) == n*log2(x).
658 *
659 * Gives accurate results all the time.
660 * (Could be trivially extended to handle other power-of-two roots.)
661 */
662 static LLVMValueRef
663 lp_build_ilog2_sqrt(struct lp_build_context *bld,
664 LLVMValueRef x)
665 {
666 LLVMBuilderRef builder = bld->gallivm->builder;
667 LLVMValueRef ipart;
668 struct lp_type i_type = lp_int_type(bld->type);
669 LLVMValueRef one = lp_build_const_int_vec(bld->gallivm, i_type, 1);
670
671 assert(bld->type.floating);
672
673 assert(lp_check_value(bld->type, x));
674
675 /* ipart = log2(x) + 0.5 = 0.5*(log2(x^2) + 1.0) */
676 ipart = lp_build_extract_exponent(bld, x, 1);
677 ipart = LLVMBuildAShr(builder, ipart, one, "");
678
679 return ipart;
680 }
681
682
683 /**
684 * Generate code to compute texture level of detail (lambda).
685 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
686 * \param lod_bias optional float vector with the shader lod bias
687 * \param explicit_lod optional float vector with the explicit lod
688 * \param cube_rho rho calculated by cube coord mapping (optional)
689 * \param out_lod_ipart integer part of lod
690 * \param out_lod_fpart float part of lod (never larger than 1 but may be negative)
691 * \param out_lod_positive (mask) if lod is positive (i.e. texture is minified)
692 *
693 * The resulting lod can be scalar per quad or be per element.
694 */
695 void
696 lp_build_lod_selector(struct lp_build_sample_context *bld,
697 unsigned texture_unit,
698 unsigned sampler_unit,
699 LLVMValueRef s,
700 LLVMValueRef t,
701 LLVMValueRef r,
702 LLVMValueRef cube_rho,
703 const struct lp_derivatives *derivs,
704 LLVMValueRef lod_bias, /* optional */
705 LLVMValueRef explicit_lod, /* optional */
706 unsigned mip_filter,
707 LLVMValueRef *out_lod_ipart,
708 LLVMValueRef *out_lod_fpart,
709 LLVMValueRef *out_lod_positive)
710
711 {
712 LLVMBuilderRef builder = bld->gallivm->builder;
713 struct lp_build_context *lodf_bld = &bld->lodf_bld;
714 LLVMValueRef lod;
715
716 *out_lod_ipart = bld->lodi_bld.zero;
717 *out_lod_positive = bld->lodi_bld.zero;
718 *out_lod_fpart = lodf_bld->zero;
719
720 /*
721 * For determining min/mag, we follow GL 4.1 spec, 3.9.12 Texture Magnification:
722 * "Implementations may either unconditionally assume c = 0 for the minification
723 * vs. magnification switch-over point, or may choose to make c depend on the
724 * combination of minification and magnification modes as follows: if the
725 * magnification filter is given by LINEAR and the minification filter is given
726 * by NEAREST_MIPMAP_NEAREST or NEAREST_MIPMAP_LINEAR, then c = 0.5. This is
727 * done to ensure that a minified texture does not appear "sharper" than a
728 * magnified texture. Otherwise c = 0."
729 * And 3.9.11 Texture Minification:
730 * "If lod is less than or equal to the constant c (see section 3.9.12) the
731 * texture is said to be magnified; if it is greater, the texture is minified."
732 * So, using 0 as switchover point always, and using magnification for lod == 0.
733 * Note that the always c = 0 behavior is new (first appearing in GL 3.1 spec),
734 * old GL versions required 0.5 for the modes listed above.
735 * I have no clue about the (undocumented) wishes of d3d9/d3d10 here!
736 */
737
738 if (bld->static_sampler_state->min_max_lod_equal) {
739 /* User is forcing sampling from a particular mipmap level.
740 * This is hit during mipmap generation.
741 */
742 LLVMValueRef min_lod =
743 bld->dynamic_state->min_lod(bld->dynamic_state,
744 bld->gallivm, sampler_unit);
745
746 lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
747 }
748 else {
749 if (explicit_lod) {
750 if (bld->num_lods != bld->coord_type.length)
751 lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
752 lodf_bld->type, explicit_lod, 0);
753 else
754 lod = explicit_lod;
755 }
756 else {
757 LLVMValueRef rho;
758 boolean rho_squared = ((gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) &&
759 (bld->dims > 1)) || cube_rho;
760
761 rho = lp_build_rho(bld, texture_unit, s, t, r, cube_rho, derivs);
762
763 /*
764 * Compute lod = log2(rho)
765 */
766
767 if (!lod_bias &&
768 !bld->static_sampler_state->lod_bias_non_zero &&
769 !bld->static_sampler_state->apply_max_lod &&
770 !bld->static_sampler_state->apply_min_lod) {
771 /*
772 * Special case when there are no post-log2 adjustments, which
773 * saves instructions but keeping the integer and fractional lod
774 * computations separate from the start.
775 */
776
777 if (mip_filter == PIPE_TEX_MIPFILTER_NONE ||
778 mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
779 /*
780 * Don't actually need both values all the time, lod_ipart is
781 * needed for nearest mipfilter, lod_positive if min != mag.
782 */
783 if (rho_squared) {
784 *out_lod_ipart = lp_build_ilog2_sqrt(lodf_bld, rho);
785 }
786 else {
787 *out_lod_ipart = lp_build_ilog2(lodf_bld, rho);
788 }
789 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
790 rho, lodf_bld->one);
791 return;
792 }
793 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR &&
794 !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR) &&
795 !rho_squared) {
796 /*
797 * This can't work if rho is squared. Not sure if it could be
798 * fixed while keeping it worthwile, could also do sqrt here
799 * but brilinear and no_rho_opt seems like a combination not
800 * making much sense anyway so just use ordinary path below.
801 */
802 lp_build_brilinear_rho(lodf_bld, rho, BRILINEAR_FACTOR,
803 out_lod_ipart, out_lod_fpart);
804 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
805 rho, lodf_bld->one);
806 return;
807 }
808 }
809
810 if (0) {
811 lod = lp_build_log2(lodf_bld, rho);
812 }
813 else {
814 lod = lp_build_fast_log2(lodf_bld, rho);
815 }
816 if (rho_squared) {
817 /* log2(x^2) == 0.5*log2(x) */
818 lod = lp_build_mul(lodf_bld, lod,
819 lp_build_const_vec(bld->gallivm, lodf_bld->type, 0.5F));
820 }
821
822 /* add shader lod bias */
823 if (lod_bias) {
824 if (bld->num_lods != bld->coord_type.length)
825 lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
826 lodf_bld->type, lod_bias, 0);
827 lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias");
828 }
829 }
830
831 /* add sampler lod bias */
832 if (bld->static_sampler_state->lod_bias_non_zero) {
833 LLVMValueRef sampler_lod_bias =
834 bld->dynamic_state->lod_bias(bld->dynamic_state,
835 bld->gallivm, sampler_unit);
836 sampler_lod_bias = lp_build_broadcast_scalar(lodf_bld,
837 sampler_lod_bias);
838 lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias");
839 }
840
841 /* clamp lod */
842 if (bld->static_sampler_state->apply_max_lod) {
843 LLVMValueRef max_lod =
844 bld->dynamic_state->max_lod(bld->dynamic_state,
845 bld->gallivm, sampler_unit);
846 max_lod = lp_build_broadcast_scalar(lodf_bld, max_lod);
847
848 lod = lp_build_min(lodf_bld, lod, max_lod);
849 }
850 if (bld->static_sampler_state->apply_min_lod) {
851 LLVMValueRef min_lod =
852 bld->dynamic_state->min_lod(bld->dynamic_state,
853 bld->gallivm, sampler_unit);
854 min_lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
855
856 lod = lp_build_max(lodf_bld, lod, min_lod);
857 }
858 }
859
860 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
861 lod, lodf_bld->zero);
862
863 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
864 if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
865 lp_build_brilinear_lod(lodf_bld, lod, BRILINEAR_FACTOR,
866 out_lod_ipart, out_lod_fpart);
867 }
868 else {
869 lp_build_ifloor_fract(lodf_bld, lod, out_lod_ipart, out_lod_fpart);
870 }
871
872 lp_build_name(*out_lod_fpart, "lod_fpart");
873 }
874 else {
875 *out_lod_ipart = lp_build_iround(lodf_bld, lod);
876 }
877
878 lp_build_name(*out_lod_ipart, "lod_ipart");
879
880 return;
881 }
882
883
884 /**
885 * For PIPE_TEX_MIPFILTER_NEAREST, convert int part of lod
886 * to actual mip level.
887 * Note: this is all scalar per quad code.
888 * \param lod_ipart int texture level of detail
889 * \param level_out returns integer
890 * \param out_of_bounds returns per coord out_of_bounds mask if provided
891 */
892 void
893 lp_build_nearest_mip_level(struct lp_build_sample_context *bld,
894 unsigned texture_unit,
895 LLVMValueRef lod_ipart,
896 LLVMValueRef *level_out,
897 LLVMValueRef *out_of_bounds)
898 {
899 struct lp_build_context *leveli_bld = &bld->leveli_bld;
900 LLVMValueRef first_level, last_level, level;
901
902 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
903 bld->gallivm, texture_unit);
904 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
905 bld->gallivm, texture_unit);
906 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
907 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
908
909 level = lp_build_add(leveli_bld, lod_ipart, first_level);
910
911 if (out_of_bounds) {
912 LLVMValueRef out, out1;
913 out = lp_build_cmp(leveli_bld, PIPE_FUNC_LESS, level, first_level);
914 out1 = lp_build_cmp(leveli_bld, PIPE_FUNC_GREATER, level, last_level);
915 out = lp_build_or(leveli_bld, out, out1);
916 if (bld->num_mips == bld->coord_bld.type.length) {
917 *out_of_bounds = out;
918 }
919 else if (bld->num_mips == 1) {
920 *out_of_bounds = lp_build_broadcast_scalar(&bld->int_coord_bld, out);
921 }
922 else {
923 assert(bld->num_mips == bld->coord_bld.type.length / 4);
924 *out_of_bounds = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
925 leveli_bld->type,
926 bld->int_coord_bld.type,
927 out);
928 }
929 *level_out = level;
930 }
931 else {
932 /* clamp level to legal range of levels */
933 *level_out = lp_build_clamp(leveli_bld, level, first_level, last_level);
934
935 }
936 }
937
938
939 /**
940 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad (or per element) int LOD(s)
941 * to two (per-quad) (adjacent) mipmap level indexes, and fix up float lod
942 * part accordingly.
943 * Later, we'll sample from those two mipmap levels and interpolate between them.
944 */
945 void
946 lp_build_linear_mip_levels(struct lp_build_sample_context *bld,
947 unsigned texture_unit,
948 LLVMValueRef lod_ipart,
949 LLVMValueRef *lod_fpart_inout,
950 LLVMValueRef *level0_out,
951 LLVMValueRef *level1_out)
952 {
953 LLVMBuilderRef builder = bld->gallivm->builder;
954 struct lp_build_context *leveli_bld = &bld->leveli_bld;
955 struct lp_build_context *levelf_bld = &bld->levelf_bld;
956 LLVMValueRef first_level, last_level;
957 LLVMValueRef clamp_min;
958 LLVMValueRef clamp_max;
959
960 assert(bld->num_lods == bld->num_mips);
961
962 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
963 bld->gallivm, texture_unit);
964 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
965 bld->gallivm, texture_unit);
966 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
967 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
968
969 *level0_out = lp_build_add(leveli_bld, lod_ipart, first_level);
970 *level1_out = lp_build_add(leveli_bld, *level0_out, leveli_bld->one);
971
972 /*
973 * Clamp both *level0_out and *level1_out to [first_level, last_level], with
974 * the minimum number of comparisons, and zeroing lod_fpart in the extreme
975 * ends in the process.
976 */
977
978 /*
979 * This code (vector select in particular) only works with llvm 3.1
980 * (if there's more than one quad, with x86 backend). Might consider
981 * converting to our lp_bld_logic helpers.
982 */
983 #if HAVE_LLVM < 0x0301
984 assert(leveli_bld->type.length == 1);
985 #endif
986
987 /* *level0_out < first_level */
988 clamp_min = LLVMBuildICmp(builder, LLVMIntSLT,
989 *level0_out, first_level,
990 "clamp_lod_to_first");
991
992 *level0_out = LLVMBuildSelect(builder, clamp_min,
993 first_level, *level0_out, "");
994
995 *level1_out = LLVMBuildSelect(builder, clamp_min,
996 first_level, *level1_out, "");
997
998 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min,
999 levelf_bld->zero, *lod_fpart_inout, "");
1000
1001 /* *level0_out >= last_level */
1002 clamp_max = LLVMBuildICmp(builder, LLVMIntSGE,
1003 *level0_out, last_level,
1004 "clamp_lod_to_last");
1005
1006 *level0_out = LLVMBuildSelect(builder, clamp_max,
1007 last_level, *level0_out, "");
1008
1009 *level1_out = LLVMBuildSelect(builder, clamp_max,
1010 last_level, *level1_out, "");
1011
1012 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max,
1013 levelf_bld->zero, *lod_fpart_inout, "");
1014
1015 lp_build_name(*level0_out, "texture%u_miplevel0", texture_unit);
1016 lp_build_name(*level1_out, "texture%u_miplevel1", texture_unit);
1017 lp_build_name(*lod_fpart_inout, "texture%u_mipweight", texture_unit);
1018 }
1019
1020
1021 /**
1022 * Return pointer to a single mipmap level.
1023 * \param level integer mipmap level
1024 */
1025 LLVMValueRef
1026 lp_build_get_mipmap_level(struct lp_build_sample_context *bld,
1027 LLVMValueRef level)
1028 {
1029 LLVMBuilderRef builder = bld->gallivm->builder;
1030 LLVMValueRef indexes[2], data_ptr, mip_offset;
1031
1032 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1033 indexes[1] = level;
1034 mip_offset = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1035 mip_offset = LLVMBuildLoad(builder, mip_offset, "");
1036 data_ptr = LLVMBuildGEP(builder, bld->base_ptr, &mip_offset, 1, "");
1037 return data_ptr;
1038 }
1039
1040 /**
1041 * Return (per-pixel) offsets to mip levels.
1042 * \param level integer mipmap level
1043 */
1044 LLVMValueRef
1045 lp_build_get_mip_offsets(struct lp_build_sample_context *bld,
1046 LLVMValueRef level)
1047 {
1048 LLVMBuilderRef builder = bld->gallivm->builder;
1049 LLVMValueRef indexes[2], offsets, offset1;
1050
1051 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1052 if (bld->num_mips == 1) {
1053 indexes[1] = level;
1054 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1055 offset1 = LLVMBuildLoad(builder, offset1, "");
1056 offsets = lp_build_broadcast_scalar(&bld->int_coord_bld, offset1);
1057 }
1058 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1059 unsigned i;
1060
1061 offsets = bld->int_coord_bld.undef;
1062 for (i = 0; i < bld->num_mips; i++) {
1063 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1064 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1065 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1066 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1067 offset1 = LLVMBuildLoad(builder, offset1, "");
1068 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexo, "");
1069 }
1070 offsets = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, offsets, 0, 4);
1071 }
1072 else {
1073 unsigned i;
1074
1075 assert (bld->num_mips == bld->coord_bld.type.length);
1076
1077 offsets = bld->int_coord_bld.undef;
1078 for (i = 0; i < bld->num_mips; i++) {
1079 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1080 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1081 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1082 offset1 = LLVMBuildLoad(builder, offset1, "");
1083 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexi, "");
1084 }
1085 }
1086 return offsets;
1087 }
1088
1089
1090 /**
1091 * Codegen equivalent for u_minify().
1092 * Return max(1, base_size >> level);
1093 */
1094 LLVMValueRef
1095 lp_build_minify(struct lp_build_context *bld,
1096 LLVMValueRef base_size,
1097 LLVMValueRef level)
1098 {
1099 LLVMBuilderRef builder = bld->gallivm->builder;
1100 assert(lp_check_value(bld->type, base_size));
1101 assert(lp_check_value(bld->type, level));
1102
1103 if (level == bld->zero) {
1104 /* if we're using mipmap level zero, no minification is needed */
1105 return base_size;
1106 }
1107 else {
1108 LLVMValueRef size =
1109 LLVMBuildLShr(builder, base_size, level, "minify");
1110 assert(bld->type.sign);
1111 size = lp_build_max(bld, size, bld->one);
1112 return size;
1113 }
1114 }
1115
1116
1117 /**
1118 * Dereference stride_array[mipmap_level] array to get a stride.
1119 * Return stride as a vector.
1120 */
1121 static LLVMValueRef
1122 lp_build_get_level_stride_vec(struct lp_build_sample_context *bld,
1123 LLVMValueRef stride_array, LLVMValueRef level)
1124 {
1125 LLVMBuilderRef builder = bld->gallivm->builder;
1126 LLVMValueRef indexes[2], stride, stride1;
1127 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1128 if (bld->num_mips == 1) {
1129 indexes[1] = level;
1130 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1131 stride1 = LLVMBuildLoad(builder, stride1, "");
1132 stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride1);
1133 }
1134 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1135 LLVMValueRef stride1;
1136 unsigned i;
1137
1138 stride = bld->int_coord_bld.undef;
1139 for (i = 0; i < bld->num_mips; i++) {
1140 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1141 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1142 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1143 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1144 stride1 = LLVMBuildLoad(builder, stride1, "");
1145 stride = LLVMBuildInsertElement(builder, stride, stride1, indexo, "");
1146 }
1147 stride = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, stride, 0, 4);
1148 }
1149 else {
1150 LLVMValueRef stride1;
1151 unsigned i;
1152
1153 assert (bld->num_mips == bld->coord_bld.type.length);
1154
1155 stride = bld->int_coord_bld.undef;
1156 for (i = 0; i < bld->coord_bld.type.length; i++) {
1157 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1158 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1159 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1160 stride1 = LLVMBuildLoad(builder, stride1, "");
1161 stride = LLVMBuildInsertElement(builder, stride, stride1, indexi, "");
1162 }
1163 }
1164 return stride;
1165 }
1166
1167
1168 /**
1169 * When sampling a mipmap, we need to compute the width, height, depth
1170 * of the source levels from the level indexes. This helper function
1171 * does that.
1172 */
1173 void
1174 lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld,
1175 LLVMValueRef ilevel,
1176 LLVMValueRef *out_size,
1177 LLVMValueRef *row_stride_vec,
1178 LLVMValueRef *img_stride_vec)
1179 {
1180 const unsigned dims = bld->dims;
1181 LLVMValueRef ilevel_vec;
1182
1183 /*
1184 * Compute width, height, depth at mipmap level 'ilevel'
1185 */
1186 if (bld->num_mips == 1) {
1187 ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel);
1188 *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec);
1189 }
1190 else {
1191 LLVMValueRef int_size_vec;
1192 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
1193 unsigned num_quads = bld->coord_bld.type.length / 4;
1194 unsigned i;
1195
1196 if (bld->num_mips == num_quads) {
1197 /*
1198 * XXX: this should be #ifndef SANE_INSTRUCTION_SET.
1199 * intel "forgot" the variable shift count instruction until avx2.
1200 * A harmless 8x32 shift gets translated into 32 instructions
1201 * (16 extracts, 8 scalar shifts, 8 inserts), llvm is apparently
1202 * unable to recognize if there are really just 2 different shift
1203 * count values. So do the shift 4-wide before expansion.
1204 */
1205 struct lp_build_context bld4;
1206 struct lp_type type4;
1207
1208 type4 = bld->int_coord_bld.type;
1209 type4.length = 4;
1210
1211 lp_build_context_init(&bld4, bld->gallivm, type4);
1212
1213 if (bld->dims == 1) {
1214 assert(bld->int_size_in_bld.type.length == 1);
1215 int_size_vec = lp_build_broadcast_scalar(&bld4,
1216 bld->int_size);
1217 }
1218 else {
1219 assert(bld->int_size_in_bld.type.length == 4);
1220 int_size_vec = bld->int_size;
1221 }
1222
1223 for (i = 0; i < num_quads; i++) {
1224 LLVMValueRef ileveli;
1225 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1226
1227 ileveli = lp_build_extract_broadcast(bld->gallivm,
1228 bld->leveli_bld.type,
1229 bld4.type,
1230 ilevel,
1231 indexi);
1232 tmp[i] = lp_build_minify(&bld4, int_size_vec, ileveli);
1233 }
1234 /*
1235 * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1,
1236 * [w0, w0, w0, w0, w1, w1, w1, w1, ...] otherwise.
1237 */
1238 *out_size = lp_build_concat(bld->gallivm,
1239 tmp,
1240 bld4.type,
1241 num_quads);
1242 }
1243 else {
1244 /* FIXME: this is terrible and results in _huge_ vector
1245 * (for the dims > 1 case).
1246 * Should refactor this (together with extract_image_sizes) and do
1247 * something more useful. Could for instance if we have width,height
1248 * with 4-wide vector pack all elements into a 8xi16 vector
1249 * (on which we can still do useful math) instead of using a 16xi32
1250 * vector.
1251 * FIXME: some callers can't handle this yet.
1252 * For dims == 1 this will create [w0, w1, w2, w3, ...] vector.
1253 * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector.
1254 */
1255 assert(bld->num_mips == bld->coord_bld.type.length);
1256 if (bld->dims == 1) {
1257 assert(bld->int_size_in_bld.type.length == 1);
1258 int_size_vec = lp_build_broadcast_scalar(&bld->int_coord_bld,
1259 bld->int_size);
1260 /* vector shift with variable shift count alert... */
1261 *out_size = lp_build_minify(&bld->int_coord_bld, int_size_vec, ilevel);
1262 }
1263 else {
1264 LLVMValueRef ilevel1;
1265 for (i = 0; i < bld->num_mips; i++) {
1266 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1267 ilevel1 = lp_build_extract_broadcast(bld->gallivm, bld->int_coord_type,
1268 bld->int_size_in_bld.type, ilevel, indexi);
1269 tmp[i] = bld->int_size;
1270 tmp[i] = lp_build_minify(&bld->int_size_in_bld, tmp[i], ilevel1);
1271 }
1272 *out_size = lp_build_concat(bld->gallivm, tmp,
1273 bld->int_size_in_bld.type,
1274 bld->num_mips);
1275 }
1276 }
1277 }
1278
1279 if (dims >= 2) {
1280 *row_stride_vec = lp_build_get_level_stride_vec(bld,
1281 bld->row_stride_array,
1282 ilevel);
1283 }
1284 if (dims == 3 ||
1285 bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
1286 bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
1287 bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
1288 *img_stride_vec = lp_build_get_level_stride_vec(bld,
1289 bld->img_stride_array,
1290 ilevel);
1291 }
1292 }
1293
1294
1295 /**
1296 * Extract and broadcast texture size.
1297 *
1298 * @param size_type type of the texture size vector (either
1299 * bld->int_size_type or bld->float_size_type)
1300 * @param coord_type type of the texture size vector (either
1301 * bld->int_coord_type or bld->coord_type)
1302 * @param size vector with the texture size (width, height, depth)
1303 */
1304 void
1305 lp_build_extract_image_sizes(struct lp_build_sample_context *bld,
1306 struct lp_build_context *size_bld,
1307 struct lp_type coord_type,
1308 LLVMValueRef size,
1309 LLVMValueRef *out_width,
1310 LLVMValueRef *out_height,
1311 LLVMValueRef *out_depth)
1312 {
1313 const unsigned dims = bld->dims;
1314 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1315 struct lp_type size_type = size_bld->type;
1316
1317 if (bld->num_mips == 1) {
1318 *out_width = lp_build_extract_broadcast(bld->gallivm,
1319 size_type,
1320 coord_type,
1321 size,
1322 LLVMConstInt(i32t, 0, 0));
1323 if (dims >= 2) {
1324 *out_height = lp_build_extract_broadcast(bld->gallivm,
1325 size_type,
1326 coord_type,
1327 size,
1328 LLVMConstInt(i32t, 1, 0));
1329 if (dims == 3) {
1330 *out_depth = lp_build_extract_broadcast(bld->gallivm,
1331 size_type,
1332 coord_type,
1333 size,
1334 LLVMConstInt(i32t, 2, 0));
1335 }
1336 }
1337 }
1338 else {
1339 unsigned num_quads = bld->coord_bld.type.length / 4;
1340
1341 if (dims == 1) {
1342 *out_width = size;
1343 }
1344 else if (bld->num_mips == num_quads) {
1345 *out_width = lp_build_swizzle_scalar_aos(size_bld, size, 0, 4);
1346 if (dims >= 2) {
1347 *out_height = lp_build_swizzle_scalar_aos(size_bld, size, 1, 4);
1348 if (dims == 3) {
1349 *out_depth = lp_build_swizzle_scalar_aos(size_bld, size, 2, 4);
1350 }
1351 }
1352 }
1353 else {
1354 assert(bld->num_mips == bld->coord_type.length);
1355 *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1356 coord_type, size, 0);
1357 if (dims >= 2) {
1358 *out_height = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1359 coord_type, size, 1);
1360 if (dims == 3) {
1361 *out_depth = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1362 coord_type, size, 2);
1363 }
1364 }
1365 }
1366 }
1367 }
1368
1369
1370 /**
1371 * Unnormalize coords.
1372 *
1373 * @param flt_size vector with the integer texture size (width, height, depth)
1374 */
1375 void
1376 lp_build_unnormalized_coords(struct lp_build_sample_context *bld,
1377 LLVMValueRef flt_size,
1378 LLVMValueRef *s,
1379 LLVMValueRef *t,
1380 LLVMValueRef *r)
1381 {
1382 const unsigned dims = bld->dims;
1383 LLVMValueRef width;
1384 LLVMValueRef height;
1385 LLVMValueRef depth;
1386
1387 lp_build_extract_image_sizes(bld,
1388 &bld->float_size_bld,
1389 bld->coord_type,
1390 flt_size,
1391 &width,
1392 &height,
1393 &depth);
1394
1395 /* s = s * width, t = t * height */
1396 *s = lp_build_mul(&bld->coord_bld, *s, width);
1397 if (dims >= 2) {
1398 *t = lp_build_mul(&bld->coord_bld, *t, height);
1399 if (dims >= 3) {
1400 *r = lp_build_mul(&bld->coord_bld, *r, depth);
1401 }
1402 }
1403 }
1404
1405
1406 /** Helper used by lp_build_cube_lookup() */
1407 static LLVMValueRef
1408 lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord)
1409 {
1410 /* ima = +0.5 / abs(coord); */
1411 LLVMValueRef posHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1412 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1413 LLVMValueRef ima = lp_build_div(coord_bld, posHalf, absCoord);
1414 return ima;
1415 }
1416
1417 /** Helper used by lp_build_cube_lookup() */
1418 static LLVMValueRef
1419 lp_build_cube_imaneg(struct lp_build_context *coord_bld, LLVMValueRef coord)
1420 {
1421 /* ima = -0.5 / abs(coord); */
1422 LLVMValueRef negHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, -0.5);
1423 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1424 LLVMValueRef ima = lp_build_div(coord_bld, negHalf, absCoord);
1425 return ima;
1426 }
1427
1428 /**
1429 * Helper used by lp_build_cube_lookup()
1430 * FIXME: the sign here can also be 0.
1431 * Arithmetically this could definitely make a difference. Either
1432 * fix the comment or use other (simpler) sign function, not sure
1433 * which one it should be.
1434 * \param sign scalar +1 or -1
1435 * \param coord float vector
1436 * \param ima float vector
1437 */
1438 static LLVMValueRef
1439 lp_build_cube_coord(struct lp_build_context *coord_bld,
1440 LLVMValueRef sign, int negate_coord,
1441 LLVMValueRef coord, LLVMValueRef ima)
1442 {
1443 /* return negate(coord) * ima * sign + 0.5; */
1444 LLVMValueRef half = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1445 LLVMValueRef res;
1446
1447 assert(negate_coord == +1 || negate_coord == -1);
1448
1449 if (negate_coord == -1) {
1450 coord = lp_build_negate(coord_bld, coord);
1451 }
1452
1453 res = lp_build_mul(coord_bld, coord, ima);
1454 if (sign) {
1455 sign = lp_build_broadcast_scalar(coord_bld, sign);
1456 res = lp_build_mul(coord_bld, res, sign);
1457 }
1458 res = lp_build_add(coord_bld, res, half);
1459
1460 return res;
1461 }
1462
1463
1464 /** Helper used by lp_build_cube_lookup()
1465 * Return (major_coord >= 0) ? pos_face : neg_face;
1466 */
1467 static LLVMValueRef
1468 lp_build_cube_face(struct lp_build_sample_context *bld,
1469 LLVMValueRef major_coord,
1470 unsigned pos_face, unsigned neg_face)
1471 {
1472 struct gallivm_state *gallivm = bld->gallivm;
1473 LLVMBuilderRef builder = gallivm->builder;
1474 LLVMValueRef cmp = LLVMBuildFCmp(builder, LLVMRealUGE,
1475 major_coord,
1476 bld->float_bld.zero, "");
1477 LLVMValueRef pos = lp_build_const_int32(gallivm, pos_face);
1478 LLVMValueRef neg = lp_build_const_int32(gallivm, neg_face);
1479 LLVMValueRef res = LLVMBuildSelect(builder, cmp, pos, neg, "");
1480 return res;
1481 }
1482
1483
1484 /** Helper for doing 3-wise selection.
1485 * Returns sel1 ? val2 : (sel0 ? val0 : val1).
1486 */
1487 static LLVMValueRef
1488 lp_build_select3(struct lp_build_context *sel_bld,
1489 LLVMValueRef sel0,
1490 LLVMValueRef sel1,
1491 LLVMValueRef val0,
1492 LLVMValueRef val1,
1493 LLVMValueRef val2)
1494 {
1495 LLVMValueRef tmp;
1496 tmp = lp_build_select(sel_bld, sel0, val0, val1);
1497 return lp_build_select(sel_bld, sel1, val2, tmp);
1498 }
1499
1500 /**
1501 * Generate code to do cube face selection and compute per-face texcoords.
1502 */
1503 void
1504 lp_build_cube_lookup(struct lp_build_sample_context *bld,
1505 LLVMValueRef *coords,
1506 const struct lp_derivatives *derivs_in, /* optional */
1507 LLVMValueRef *rho,
1508 struct lp_derivatives *derivs_out, /* optional */
1509 boolean need_derivs)
1510 {
1511 struct lp_build_context *coord_bld = &bld->coord_bld;
1512 LLVMBuilderRef builder = bld->gallivm->builder;
1513 struct gallivm_state *gallivm = bld->gallivm;
1514 LLVMValueRef si, ti, ri;
1515
1516 if (1 || coord_bld->type.length > 4) {
1517 /*
1518 * Do per-pixel face selection. We cannot however (as we used to do)
1519 * simply calculate the derivs afterwards (which is very bogus for
1520 * explicit derivs btw) because the values would be "random" when
1521 * not all pixels lie on the same face. So what we do here is just
1522 * calculate the derivatives after scaling the coords by the absolute
1523 * value of the inverse major axis, and essentially do rho calculation
1524 * steps as if it were a 3d texture. This is perfect if all pixels hit
1525 * the same face, but not so great at edges, I believe the max error
1526 * should be sqrt(2) with no_rho_approx or 2 otherwise (essentially measuring
1527 * the 3d distance between 2 points on the cube instead of measuring up/down
1528 * the edge). Still this is possibly a win over just selecting the same face
1529 * for all pixels. Unfortunately, something like that doesn't work for
1530 * explicit derivatives.
1531 */
1532 struct lp_build_context *cint_bld = &bld->int_coord_bld;
1533 struct lp_type intctype = cint_bld->type;
1534 LLVMTypeRef coord_vec_type = coord_bld->vec_type;
1535 LLVMTypeRef cint_vec_type = cint_bld->vec_type;
1536 LLVMValueRef as, at, ar, face, face_s, face_t;
1537 LLVMValueRef as_ge_at, maxasat, ar_ge_as_at;
1538 LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz;
1539 LLVMValueRef tnegi, rnegi;
1540 LLVMValueRef ma, mai, signma, signmabit, imahalfpos;
1541 LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5);
1542 LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype,
1543 1 << (intctype.width - 1));
1544 LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype,
1545 intctype.width -1);
1546 LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X);
1547 LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y);
1548 LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z);
1549 LLVMValueRef s = coords[0];
1550 LLVMValueRef t = coords[1];
1551 LLVMValueRef r = coords[2];
1552
1553 assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1);
1554 assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1);
1555 assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1);
1556
1557 /*
1558 * get absolute value (for x/y/z face selection) and sign bit
1559 * (for mirroring minor coords and pos/neg face selection)
1560 * of the original coords.
1561 */
1562 as = lp_build_abs(&bld->coord_bld, s);
1563 at = lp_build_abs(&bld->coord_bld, t);
1564 ar = lp_build_abs(&bld->coord_bld, r);
1565
1566 /*
1567 * major face determination: select x if x > y else select y
1568 * select z if z >= max(x,y) else select previous result
1569 * if some axis are the same we chose z over y, y over x - the
1570 * dx10 spec seems to ask for it while OpenGL doesn't care (if we
1571 * wouldn't care could save a select or two if using different
1572 * compares and doing at_g_as_ar last since tnewx and tnewz are the
1573 * same).
1574 */
1575 as_ge_at = lp_build_cmp(coord_bld, PIPE_FUNC_GREATER, as, at);
1576 maxasat = lp_build_max(coord_bld, as, at);
1577 ar_ge_as_at = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, ar, maxasat);
1578
1579 if (need_derivs && (derivs_in ||
1580 ((gallivm_debug & GALLIVM_DEBUG_NO_QUAD_LOD) &&
1581 (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX)))) {
1582 /*
1583 * XXX: This is really really complex.
1584 * It is a bit overkill to use this for implicit derivatives as well,
1585 * no way this is worth the cost in practice, but seems to be the
1586 * only way for getting accurate and per-pixel lod values.
1587 */
1588 LLVMValueRef ima, imahalf, tmp, ddx[3], ddy[3];
1589 LLVMValueRef madx, mady, madxdivma, madydivma;
1590 LLVMValueRef sdxi, tdxi, rdxi, sdyi, tdyi, rdyi;
1591 LLVMValueRef tdxnegi, rdxnegi, tdynegi, rdynegi;
1592 LLVMValueRef sdxnewx, sdxnewy, sdxnewz, tdxnewx, tdxnewy, tdxnewz;
1593 LLVMValueRef sdynewx, sdynewy, sdynewz, tdynewx, tdynewy, tdynewz;
1594 LLVMValueRef face_sdx, face_tdx, face_sdy, face_tdy;
1595 /*
1596 * s = 1/2 * ( sc / ma + 1)
1597 * t = 1/2 * ( tc / ma + 1)
1598 *
1599 * s' = 1/2 * (sc' * ma - sc * ma') / ma^2
1600 * t' = 1/2 * (tc' * ma - tc * ma') / ma^2
1601 *
1602 * dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma
1603 * dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma
1604 * dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma
1605 * dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma
1606 */
1607
1608 /* select ma, calculate ima */
1609 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1610 mai = LLVMBuildBitCast(builder, ma, cint_vec_type, "");
1611 signmabit = LLVMBuildAnd(builder, mai, signmask, "");
1612 ima = lp_build_div(coord_bld, coord_bld->one, ma);
1613 imahalf = lp_build_mul(coord_bld, posHalf, ima);
1614 imahalfpos = lp_build_abs(coord_bld, imahalf);
1615
1616 if (!derivs_in) {
1617 ddx[0] = lp_build_ddx(coord_bld, s);
1618 ddx[1] = lp_build_ddx(coord_bld, t);
1619 ddx[2] = lp_build_ddx(coord_bld, r);
1620 ddy[0] = lp_build_ddy(coord_bld, s);
1621 ddy[1] = lp_build_ddy(coord_bld, t);
1622 ddy[2] = lp_build_ddy(coord_bld, r);
1623 }
1624 else {
1625 ddx[0] = derivs_in->ddx[0];
1626 ddx[1] = derivs_in->ddx[1];
1627 ddx[2] = derivs_in->ddx[2];
1628 ddy[0] = derivs_in->ddy[0];
1629 ddy[1] = derivs_in->ddy[1];
1630 ddy[2] = derivs_in->ddy[2];
1631 }
1632
1633 /* select major derivatives */
1634 madx = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddx[0], ddx[1], ddx[2]);
1635 mady = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddy[0], ddy[1], ddy[2]);
1636
1637 si = LLVMBuildBitCast(builder, s, cint_vec_type, "");
1638 ti = LLVMBuildBitCast(builder, t, cint_vec_type, "");
1639 ri = LLVMBuildBitCast(builder, r, cint_vec_type, "");
1640
1641 sdxi = LLVMBuildBitCast(builder, ddx[0], cint_vec_type, "");
1642 tdxi = LLVMBuildBitCast(builder, ddx[1], cint_vec_type, "");
1643 rdxi = LLVMBuildBitCast(builder, ddx[2], cint_vec_type, "");
1644
1645 sdyi = LLVMBuildBitCast(builder, ddy[0], cint_vec_type, "");
1646 tdyi = LLVMBuildBitCast(builder, ddy[1], cint_vec_type, "");
1647 rdyi = LLVMBuildBitCast(builder, ddy[2], cint_vec_type, "");
1648
1649 /*
1650 * compute all possible new s/t coords, which does the mirroring,
1651 * and do the same for derivs minor axes.
1652 * snewx = signma * -r;
1653 * tnewx = -t;
1654 * snewy = s;
1655 * tnewy = signma * r;
1656 * snewz = signma * s;
1657 * tnewz = -t;
1658 */
1659 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1660 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1661 tdxnegi = LLVMBuildXor(builder, tdxi, signmask, "");
1662 rdxnegi = LLVMBuildXor(builder, rdxi, signmask, "");
1663 tdynegi = LLVMBuildXor(builder, tdyi, signmask, "");
1664 rdynegi = LLVMBuildXor(builder, rdyi, signmask, "");
1665
1666 snewx = LLVMBuildXor(builder, signmabit, rnegi, "");
1667 tnewx = tnegi;
1668 sdxnewx = LLVMBuildXor(builder, signmabit, rdxnegi, "");
1669 tdxnewx = tdxnegi;
1670 sdynewx = LLVMBuildXor(builder, signmabit, rdynegi, "");
1671 tdynewx = tdynegi;
1672
1673 snewy = si;
1674 tnewy = LLVMBuildXor(builder, signmabit, ri, "");
1675 sdxnewy = sdxi;
1676 tdxnewy = LLVMBuildXor(builder, signmabit, rdxi, "");
1677 sdynewy = sdyi;
1678 tdynewy = LLVMBuildXor(builder, signmabit, rdyi, "");
1679
1680 snewz = LLVMBuildXor(builder, signmabit, si, "");
1681 tnewz = tnegi;
1682 sdxnewz = LLVMBuildXor(builder, signmabit, sdxi, "");
1683 tdxnewz = tdxnegi;
1684 sdynewz = LLVMBuildXor(builder, signmabit, sdyi, "");
1685 tdynewz = tdynegi;
1686
1687 /* select the mirrored values */
1688 face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez);
1689 face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz);
1690 face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz);
1691 face_sdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdxnewx, sdxnewy, sdxnewz);
1692 face_tdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdxnewx, tdxnewy, tdxnewz);
1693 face_sdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdynewx, sdynewy, sdynewz);
1694 face_tdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdynewx, tdynewy, tdynewz);
1695
1696 face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, "");
1697 face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, "");
1698 face_sdx = LLVMBuildBitCast(builder, face_sdx, coord_vec_type, "");
1699 face_tdx = LLVMBuildBitCast(builder, face_tdx, coord_vec_type, "");
1700 face_sdy = LLVMBuildBitCast(builder, face_sdy, coord_vec_type, "");
1701 face_tdy = LLVMBuildBitCast(builder, face_tdy, coord_vec_type, "");
1702
1703 /* deriv math, dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma */
1704 madxdivma = lp_build_mul(coord_bld, madx, ima);
1705 tmp = lp_build_mul(coord_bld, madxdivma, face_s);
1706 tmp = lp_build_sub(coord_bld, face_sdx, tmp);
1707 derivs_out->ddx[0] = lp_build_mul(coord_bld, tmp, imahalf);
1708
1709 /* dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma */
1710 tmp = lp_build_mul(coord_bld, madxdivma, face_t);
1711 tmp = lp_build_sub(coord_bld, face_tdx, tmp);
1712 derivs_out->ddx[1] = lp_build_mul(coord_bld, tmp, imahalf);
1713
1714 /* dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma */
1715 madydivma = lp_build_mul(coord_bld, mady, ima);
1716 tmp = lp_build_mul(coord_bld, madydivma, face_s);
1717 tmp = lp_build_sub(coord_bld, face_sdy, tmp);
1718 derivs_out->ddy[0] = lp_build_mul(coord_bld, tmp, imahalf);
1719
1720 /* dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma */
1721 tmp = lp_build_mul(coord_bld, madydivma, face_t);
1722 tmp = lp_build_sub(coord_bld, face_tdy, tmp);
1723 derivs_out->ddy[1] = lp_build_mul(coord_bld, tmp, imahalf);
1724
1725 signma = LLVMBuildLShr(builder, mai, signshift, "");
1726 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1727
1728 /* project coords */
1729 face_s = lp_build_mul(coord_bld, face_s, imahalfpos);
1730 face_t = lp_build_mul(coord_bld, face_t, imahalfpos);
1731
1732 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
1733 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
1734
1735 return;
1736 }
1737
1738 else if (need_derivs) {
1739 LLVMValueRef ddx_ddy[2], tmp[3], rho_vec;
1740 static const unsigned char swizzle0[] = { /* no-op swizzle */
1741 0, LP_BLD_SWIZZLE_DONTCARE,
1742 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1743 };
1744 static const unsigned char swizzle1[] = {
1745 1, LP_BLD_SWIZZLE_DONTCARE,
1746 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1747 };
1748 static const unsigned char swizzle01[] = { /* no-op swizzle */
1749 0, 1,
1750 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1751 };
1752 static const unsigned char swizzle23[] = {
1753 2, 3,
1754 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1755 };
1756 static const unsigned char swizzle02[] = {
1757 0, 2,
1758 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1759 };
1760
1761 /*
1762 * scale the s/t/r coords pre-select/mirror so we can calculate
1763 * "reasonable" derivs.
1764 */
1765 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1766 imahalfpos = lp_build_cube_imapos(coord_bld, ma);
1767 s = lp_build_mul(coord_bld, s, imahalfpos);
1768 t = lp_build_mul(coord_bld, t, imahalfpos);
1769 r = lp_build_mul(coord_bld, r, imahalfpos);
1770
1771 /*
1772 * This isn't quite the same as the "ordinary" (3d deriv) path since we
1773 * know the texture is square which simplifies things (we can omit the
1774 * size mul which happens very early completely here and do it at the
1775 * very end).
1776 * Also always do calculations according to GALLIVM_DEBUG_NO_RHO_APPROX
1777 * since the error can get quite big otherwise at edges.
1778 * (With no_rho_approx max error is sqrt(2) at edges, same as it is
1779 * without no_rho_approx for 2d textures, otherwise it would be factor 2.)
1780 */
1781 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
1782 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
1783
1784 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
1785 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
1786
1787 tmp[0] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
1788 tmp[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
1789 tmp[2] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
1790
1791 rho_vec = lp_build_add(coord_bld, tmp[0], tmp[1]);
1792 rho_vec = lp_build_add(coord_bld, rho_vec, tmp[2]);
1793
1794 tmp[0] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
1795 tmp[1] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
1796 *rho = lp_build_max(coord_bld, tmp[0], tmp[1]);
1797 }
1798
1799 if (!need_derivs) {
1800 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1801 }
1802 mai = LLVMBuildBitCast(builder, ma, cint_vec_type, "");
1803 signmabit = LLVMBuildAnd(builder, mai, signmask, "");
1804
1805 si = LLVMBuildBitCast(builder, s, cint_vec_type, "");
1806 ti = LLVMBuildBitCast(builder, t, cint_vec_type, "");
1807 ri = LLVMBuildBitCast(builder, r, cint_vec_type, "");
1808
1809 /*
1810 * compute all possible new s/t coords, which does the mirroring
1811 * snewx = signma * -r;
1812 * tnewx = -t;
1813 * snewy = s;
1814 * tnewy = signma * r;
1815 * snewz = signma * s;
1816 * tnewz = -t;
1817 */
1818 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1819 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1820
1821 snewx = LLVMBuildXor(builder, signmabit, rnegi, "");
1822 tnewx = tnegi;
1823
1824 snewy = si;
1825 tnewy = LLVMBuildXor(builder, signmabit, ri, "");
1826
1827 snewz = LLVMBuildXor(builder, signmabit, si, "");
1828 tnewz = tnegi;
1829
1830 /* select the mirrored values */
1831 face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz);
1832 face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz);
1833 face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez);
1834
1835 face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, "");
1836 face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, "");
1837
1838 /* add +1 for neg face */
1839 /* XXX with AVX probably want to use another select here -
1840 * as long as we ensure vblendvps gets used we can actually
1841 * skip the comparison and just use sign as a "mask" directly.
1842 */
1843 signma = LLVMBuildLShr(builder, mai, signshift, "");
1844 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1845
1846 /* project coords */
1847 if (!need_derivs) {
1848 imahalfpos = lp_build_cube_imapos(coord_bld, ma);
1849 face_s = lp_build_mul(coord_bld, face_s, imahalfpos);
1850 face_t = lp_build_mul(coord_bld, face_t, imahalfpos);
1851 }
1852
1853 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
1854 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
1855 }
1856
1857 else {
1858 struct lp_build_if_state if_ctx;
1859 LLVMValueRef face_s_var;
1860 LLVMValueRef face_t_var;
1861 LLVMValueRef face_var;
1862 LLVMValueRef arx_ge_ary_arz, ary_ge_arx_arz;
1863 LLVMValueRef shuffles[4];
1864 LLVMValueRef arxy_ge_aryx, arxy_ge_arzz, arxy_ge_arxy_arzz;
1865 LLVMValueRef arxyxy, aryxzz, arxyxy_ge_aryxzz;
1866 LLVMValueRef tmp[4], rxyz, arxyz;
1867 struct lp_build_context *float_bld = &bld->float_bld;
1868 LLVMValueRef s, t, r, face, face_s, face_t;
1869
1870 assert(bld->coord_bld.type.length == 4);
1871
1872 tmp[0] = s = coords[0];
1873 tmp[1] = t = coords[1];
1874 tmp[2] = r = coords[2];
1875 rxyz = lp_build_hadd_partial4(&bld->coord_bld, tmp, 3);
1876 arxyz = lp_build_abs(&bld->coord_bld, rxyz);
1877
1878 shuffles[0] = lp_build_const_int32(gallivm, 0);
1879 shuffles[1] = lp_build_const_int32(gallivm, 1);
1880 shuffles[2] = lp_build_const_int32(gallivm, 0);
1881 shuffles[3] = lp_build_const_int32(gallivm, 1);
1882 arxyxy = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1883 shuffles[0] = lp_build_const_int32(gallivm, 1);
1884 shuffles[1] = lp_build_const_int32(gallivm, 0);
1885 shuffles[2] = lp_build_const_int32(gallivm, 2);
1886 shuffles[3] = lp_build_const_int32(gallivm, 2);
1887 aryxzz = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1888 arxyxy_ge_aryxzz = lp_build_cmp(&bld->coord_bld, PIPE_FUNC_GEQUAL, arxyxy, aryxzz);
1889
1890 shuffles[0] = lp_build_const_int32(gallivm, 0);
1891 shuffles[1] = lp_build_const_int32(gallivm, 1);
1892 arxy_ge_aryx = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1893 LLVMConstVector(shuffles, 2), "");
1894 shuffles[0] = lp_build_const_int32(gallivm, 2);
1895 shuffles[1] = lp_build_const_int32(gallivm, 3);
1896 arxy_ge_arzz = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1897 LLVMConstVector(shuffles, 2), "");
1898 arxy_ge_arxy_arzz = LLVMBuildAnd(builder, arxy_ge_aryx, arxy_ge_arzz, "");
1899
1900 arx_ge_ary_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1901 lp_build_const_int32(gallivm, 0), "");
1902 arx_ge_ary_arz = LLVMBuildICmp(builder, LLVMIntNE, arx_ge_ary_arz,
1903 lp_build_const_int32(gallivm, 0), "");
1904 ary_ge_arx_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1905 lp_build_const_int32(gallivm, 1), "");
1906 ary_ge_arx_arz = LLVMBuildICmp(builder, LLVMIntNE, ary_ge_arx_arz,
1907 lp_build_const_int32(gallivm, 0), "");
1908 face_s_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_s_var");
1909 face_t_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_t_var");
1910 face_var = lp_build_alloca(gallivm, bld->int_bld.vec_type, "face_var");
1911
1912 lp_build_if(&if_ctx, gallivm, arx_ge_ary_arz);
1913 {
1914 /* +/- X face */
1915 LLVMValueRef sign, ima;
1916 si = LLVMBuildExtractElement(builder, rxyz,
1917 lp_build_const_int32(gallivm, 0), "");
1918 /* +/- X face */
1919 sign = lp_build_sgn(float_bld, si);
1920 ima = lp_build_cube_imaneg(coord_bld, s);
1921 face_s = lp_build_cube_coord(coord_bld, sign, +1, r, ima);
1922 face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1923 face = lp_build_cube_face(bld, si,
1924 PIPE_TEX_FACE_POS_X,
1925 PIPE_TEX_FACE_NEG_X);
1926 LLVMBuildStore(builder, face_s, face_s_var);
1927 LLVMBuildStore(builder, face_t, face_t_var);
1928 LLVMBuildStore(builder, face, face_var);
1929 }
1930 lp_build_else(&if_ctx);
1931 {
1932 struct lp_build_if_state if_ctx2;
1933
1934 lp_build_if(&if_ctx2, gallivm, ary_ge_arx_arz);
1935 {
1936 LLVMValueRef sign, ima;
1937 /* +/- Y face */
1938 ti = LLVMBuildExtractElement(builder, rxyz,
1939 lp_build_const_int32(gallivm, 1), "");
1940 sign = lp_build_sgn(float_bld, ti);
1941 ima = lp_build_cube_imaneg(coord_bld, t);
1942 face_s = lp_build_cube_coord(coord_bld, NULL, -1, s, ima);
1943 face_t = lp_build_cube_coord(coord_bld, sign, -1, r, ima);
1944 face = lp_build_cube_face(bld, ti,
1945 PIPE_TEX_FACE_POS_Y,
1946 PIPE_TEX_FACE_NEG_Y);
1947 LLVMBuildStore(builder, face_s, face_s_var);
1948 LLVMBuildStore(builder, face_t, face_t_var);
1949 LLVMBuildStore(builder, face, face_var);
1950 }
1951 lp_build_else(&if_ctx2);
1952 {
1953 /* +/- Z face */
1954 LLVMValueRef sign, ima;
1955 ri = LLVMBuildExtractElement(builder, rxyz,
1956 lp_build_const_int32(gallivm, 2), "");
1957 sign = lp_build_sgn(float_bld, ri);
1958 ima = lp_build_cube_imaneg(coord_bld, r);
1959 face_s = lp_build_cube_coord(coord_bld, sign, -1, s, ima);
1960 face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1961 face = lp_build_cube_face(bld, ri,
1962 PIPE_TEX_FACE_POS_Z,
1963 PIPE_TEX_FACE_NEG_Z);
1964 LLVMBuildStore(builder, face_s, face_s_var);
1965 LLVMBuildStore(builder, face_t, face_t_var);
1966 LLVMBuildStore(builder, face, face_var);
1967 }
1968 lp_build_endif(&if_ctx2);
1969 }
1970
1971 lp_build_endif(&if_ctx);
1972
1973 coords[0] = LLVMBuildLoad(builder, face_s_var, "face_s");
1974 coords[1] = LLVMBuildLoad(builder, face_t_var, "face_t");
1975 face = LLVMBuildLoad(builder, face_var, "face");
1976 coords[2] = lp_build_broadcast_scalar(&bld->int_coord_bld, face);
1977 }
1978 }
1979
1980
1981 /**
1982 * Compute the partial offset of a pixel block along an arbitrary axis.
1983 *
1984 * @param coord coordinate in pixels
1985 * @param stride number of bytes between rows of successive pixel blocks
1986 * @param block_length number of pixels in a pixels block along the coordinate
1987 * axis
1988 * @param out_offset resulting relative offset of the pixel block in bytes
1989 * @param out_subcoord resulting sub-block pixel coordinate
1990 */
1991 void
1992 lp_build_sample_partial_offset(struct lp_build_context *bld,
1993 unsigned block_length,
1994 LLVMValueRef coord,
1995 LLVMValueRef stride,
1996 LLVMValueRef *out_offset,
1997 LLVMValueRef *out_subcoord)
1998 {
1999 LLVMBuilderRef builder = bld->gallivm->builder;
2000 LLVMValueRef offset;
2001 LLVMValueRef subcoord;
2002
2003 if (block_length == 1) {
2004 subcoord = bld->zero;
2005 }
2006 else {
2007 /*
2008 * Pixel blocks have power of two dimensions. LLVM should convert the
2009 * rem/div to bit arithmetic.
2010 * TODO: Verify this.
2011 * It does indeed BUT it does transform it to scalar (and back) when doing so
2012 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
2013 * The generated code looks seriously unfunny and is quite expensive.
2014 */
2015 #if 0
2016 LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length);
2017 subcoord = LLVMBuildURem(builder, coord, block_width, "");
2018 coord = LLVMBuildUDiv(builder, coord, block_width, "");
2019 #else
2020 unsigned logbase2 = util_logbase2(block_length);
2021 LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2);
2022 LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1);
2023 subcoord = LLVMBuildAnd(builder, coord, block_mask, "");
2024 coord = LLVMBuildLShr(builder, coord, block_shift, "");
2025 #endif
2026 }
2027
2028 offset = lp_build_mul(bld, coord, stride);
2029
2030 assert(out_offset);
2031 assert(out_subcoord);
2032
2033 *out_offset = offset;
2034 *out_subcoord = subcoord;
2035 }
2036
2037
2038 /**
2039 * Compute the offset of a pixel block.
2040 *
2041 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
2042 *
2043 * Returns the relative offset and i,j sub-block coordinates
2044 */
2045 void
2046 lp_build_sample_offset(struct lp_build_context *bld,
2047 const struct util_format_description *format_desc,
2048 LLVMValueRef x,
2049 LLVMValueRef y,
2050 LLVMValueRef z,
2051 LLVMValueRef y_stride,
2052 LLVMValueRef z_stride,
2053 LLVMValueRef *out_offset,
2054 LLVMValueRef *out_i,
2055 LLVMValueRef *out_j)
2056 {
2057 LLVMValueRef x_stride;
2058 LLVMValueRef offset;
2059
2060 x_stride = lp_build_const_vec(bld->gallivm, bld->type,
2061 format_desc->block.bits/8);
2062
2063 lp_build_sample_partial_offset(bld,
2064 format_desc->block.width,
2065 x, x_stride,
2066 &offset, out_i);
2067
2068 if (y && y_stride) {
2069 LLVMValueRef y_offset;
2070 lp_build_sample_partial_offset(bld,
2071 format_desc->block.height,
2072 y, y_stride,
2073 &y_offset, out_j);
2074 offset = lp_build_add(bld, offset, y_offset);
2075 }
2076 else {
2077 *out_j = bld->zero;
2078 }
2079
2080 if (z && z_stride) {
2081 LLVMValueRef z_offset;
2082 LLVMValueRef k;
2083 lp_build_sample_partial_offset(bld,
2084 1, /* pixel blocks are always 2D */
2085 z, z_stride,
2086 &z_offset, &k);
2087 offset = lp_build_add(bld, offset, z_offset);
2088 }
2089
2090 *out_offset = offset;
2091 }