1c352006f3e5f0b953e9230d0562046fac069064
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- common code.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_state.h"
37 #include "util/u_format.h"
38 #include "util/u_math.h"
39 #include "lp_bld_arit.h"
40 #include "lp_bld_const.h"
41 #include "lp_bld_debug.h"
42 #include "lp_bld_printf.h"
43 #include "lp_bld_flow.h"
44 #include "lp_bld_sample.h"
45 #include "lp_bld_swizzle.h"
46 #include "lp_bld_type.h"
47 #include "lp_bld_logic.h"
48 #include "lp_bld_pack.h"
49 #include "lp_bld_quad.h"
50 #include "lp_bld_bitarit.h"
51
52
53 /*
54 * Bri-linear factor. Should be greater than one.
55 */
56 #define BRILINEAR_FACTOR 2
57
58 /**
59 * Does the given texture wrap mode allow sampling the texture border color?
60 * XXX maybe move this into gallium util code.
61 */
62 boolean
63 lp_sampler_wrap_mode_uses_border_color(unsigned mode,
64 unsigned min_img_filter,
65 unsigned mag_img_filter)
66 {
67 switch (mode) {
68 case PIPE_TEX_WRAP_REPEAT:
69 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
70 case PIPE_TEX_WRAP_MIRROR_REPEAT:
71 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
72 return FALSE;
73 case PIPE_TEX_WRAP_CLAMP:
74 case PIPE_TEX_WRAP_MIRROR_CLAMP:
75 if (min_img_filter == PIPE_TEX_FILTER_NEAREST &&
76 mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
77 return FALSE;
78 } else {
79 return TRUE;
80 }
81 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
82 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
83 return TRUE;
84 default:
85 assert(0 && "unexpected wrap mode");
86 return FALSE;
87 }
88 }
89
90
91 /**
92 * Initialize lp_sampler_static_texture_state object with the gallium
93 * texture/sampler_view state (this contains the parts which are
94 * considered static).
95 */
96 void
97 lp_sampler_static_texture_state(struct lp_static_texture_state *state,
98 const struct pipe_sampler_view *view)
99 {
100 const struct pipe_resource *texture;
101
102 memset(state, 0, sizeof *state);
103
104 if (!view || !view->texture)
105 return;
106
107 texture = view->texture;
108
109 state->format = view->format;
110 state->swizzle_r = view->swizzle_r;
111 state->swizzle_g = view->swizzle_g;
112 state->swizzle_b = view->swizzle_b;
113 state->swizzle_a = view->swizzle_a;
114
115 state->target = texture->target;
116 state->pot_width = util_is_power_of_two(texture->width0);
117 state->pot_height = util_is_power_of_two(texture->height0);
118 state->pot_depth = util_is_power_of_two(texture->depth0);
119 state->level_zero_only = !view->u.tex.last_level;
120
121 /*
122 * the layer / element / level parameters are all either dynamic
123 * state or handled transparently wrt execution.
124 */
125 }
126
127
128 /**
129 * Initialize lp_sampler_static_sampler_state object with the gallium sampler
130 * state (this contains the parts which are considered static).
131 */
132 void
133 lp_sampler_static_sampler_state(struct lp_static_sampler_state *state,
134 const struct pipe_sampler_state *sampler)
135 {
136 memset(state, 0, sizeof *state);
137
138 if (!sampler)
139 return;
140
141 /*
142 * We don't copy sampler state over unless it is actually enabled, to avoid
143 * spurious recompiles, as the sampler static state is part of the shader
144 * key.
145 *
146 * Ideally the state tracker or cso_cache module would make all state
147 * canonical, but until that happens it's better to be safe than sorry here.
148 *
149 * XXX: Actually there's much more than can be done here, especially
150 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
151 */
152
153 state->wrap_s = sampler->wrap_s;
154 state->wrap_t = sampler->wrap_t;
155 state->wrap_r = sampler->wrap_r;
156 state->min_img_filter = sampler->min_img_filter;
157 state->mag_img_filter = sampler->mag_img_filter;
158 state->seamless_cube_map = sampler->seamless_cube_map;
159
160 if (sampler->max_lod > 0.0f) {
161 state->min_mip_filter = sampler->min_mip_filter;
162 } else {
163 state->min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
164 }
165
166 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE ||
167 state->min_img_filter != state->mag_img_filter) {
168 if (sampler->lod_bias != 0.0f) {
169 state->lod_bias_non_zero = 1;
170 }
171
172 /* If min_lod == max_lod we can greatly simplify mipmap selection.
173 * This is a case that occurs during automatic mipmap generation.
174 */
175 if (sampler->min_lod == sampler->max_lod) {
176 state->min_max_lod_equal = 1;
177 } else {
178 if (sampler->min_lod > 0.0f) {
179 state->apply_min_lod = 1;
180 }
181
182 /*
183 * XXX this won't do anything with the mesa state tracker which always
184 * sets max_lod to not more than actually present mip maps...
185 */
186 if (sampler->max_lod < (PIPE_MAX_TEXTURE_LEVELS - 1)) {
187 state->apply_max_lod = 1;
188 }
189 }
190 }
191
192 state->compare_mode = sampler->compare_mode;
193 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) {
194 state->compare_func = sampler->compare_func;
195 }
196
197 state->normalized_coords = sampler->normalized_coords;
198 }
199
200
201 /**
202 * Generate code to compute coordinate gradient (rho).
203 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
204 *
205 * The resulting rho has bld->levelf format (per quad or per element).
206 */
207 static LLVMValueRef
208 lp_build_rho(struct lp_build_sample_context *bld,
209 unsigned texture_unit,
210 LLVMValueRef s,
211 LLVMValueRef t,
212 LLVMValueRef r,
213 LLVMValueRef cube_rho,
214 const struct lp_derivatives *derivs)
215 {
216 struct gallivm_state *gallivm = bld->gallivm;
217 struct lp_build_context *int_size_bld = &bld->int_size_in_bld;
218 struct lp_build_context *float_size_bld = &bld->float_size_in_bld;
219 struct lp_build_context *float_bld = &bld->float_bld;
220 struct lp_build_context *coord_bld = &bld->coord_bld;
221 struct lp_build_context *rho_bld = &bld->lodf_bld;
222 const unsigned dims = bld->dims;
223 LLVMValueRef ddx_ddy[2];
224 LLVMBuilderRef builder = bld->gallivm->builder;
225 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
226 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
227 LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0);
228 LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0);
229 LLVMValueRef rho_vec;
230 LLVMValueRef int_size, float_size;
231 LLVMValueRef rho;
232 LLVMValueRef first_level, first_level_vec;
233 unsigned length = coord_bld->type.length;
234 unsigned num_quads = length / 4;
235 boolean rho_per_quad = rho_bld->type.length != length;
236 boolean no_rho_opt = (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) && (dims > 1);
237 unsigned i;
238 LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
239 LLVMValueRef rho_xvec, rho_yvec;
240
241 /* Note that all simplified calculations will only work for isotropic filtering */
242
243 /*
244 * rho calcs are always per quad except for explicit derivs (excluding
245 * the messy cube maps for now) when requested.
246 */
247
248 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
249 bld->gallivm, texture_unit);
250 first_level_vec = lp_build_broadcast_scalar(int_size_bld, first_level);
251 int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec);
252 float_size = lp_build_int_to_float(float_size_bld, int_size);
253
254 if (cube_rho) {
255 LLVMValueRef cubesize;
256 LLVMValueRef index0 = lp_build_const_int32(gallivm, 0);
257
258 /*
259 * Cube map code did already everything except size mul and per-quad extraction.
260 * Luckily cube maps are always quadratic!
261 */
262 if (rho_per_quad) {
263 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
264 rho_bld->type, cube_rho, 0);
265 }
266 else {
267 rho = lp_build_swizzle_scalar_aos(coord_bld, cube_rho, 0, 4);
268 }
269 /* Could optimize this for single quad just skip the broadcast */
270 cubesize = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
271 rho_bld->type, float_size, index0);
272 /* skipping sqrt hence returning rho squared */
273 cubesize = lp_build_mul(rho_bld, cubesize, cubesize);
274 rho = lp_build_mul(rho_bld, cubesize, rho);
275 }
276 else if (derivs) {
277 LLVMValueRef ddmax[3], ddx[3], ddy[3];
278 for (i = 0; i < dims; i++) {
279 LLVMValueRef floatdim;
280 LLVMValueRef indexi = lp_build_const_int32(gallivm, i);
281
282 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
283 coord_bld->type, float_size, indexi);
284
285 /*
286 * note that for rho_per_quad case could reduce math (at some shuffle
287 * cost), but for now use same code to per-pixel lod case.
288 */
289 if (no_rho_opt) {
290 ddx[i] = lp_build_mul(coord_bld, floatdim, derivs->ddx[i]);
291 ddy[i] = lp_build_mul(coord_bld, floatdim, derivs->ddy[i]);
292 ddx[i] = lp_build_mul(coord_bld, ddx[i], ddx[i]);
293 ddy[i] = lp_build_mul(coord_bld, ddy[i], ddy[i]);
294 }
295 else {
296 LLVMValueRef tmpx, tmpy;
297 tmpx = lp_build_abs(coord_bld, derivs->ddx[i]);
298 tmpy = lp_build_abs(coord_bld, derivs->ddy[i]);
299 ddmax[i] = lp_build_max(coord_bld, tmpx, tmpy);
300 ddmax[i] = lp_build_mul(coord_bld, floatdim, ddmax[i]);
301 }
302 }
303 if (no_rho_opt) {
304 rho_xvec = lp_build_add(coord_bld, ddx[0], ddx[1]);
305 rho_yvec = lp_build_add(coord_bld, ddy[0], ddy[1]);
306 if (dims > 2) {
307 rho_xvec = lp_build_add(coord_bld, rho_xvec, ddx[2]);
308 rho_yvec = lp_build_add(coord_bld, rho_yvec, ddy[2]);
309 }
310 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
311 /* skipping sqrt hence returning rho squared */
312 }
313 else {
314 rho = ddmax[0];
315 if (dims > 1) {
316 rho = lp_build_max(coord_bld, rho, ddmax[1]);
317 if (dims > 2) {
318 rho = lp_build_max(coord_bld, rho, ddmax[2]);
319 }
320 }
321 }
322 if (rho_per_quad) {
323 /*
324 * rho_vec contains per-pixel rho, convert to scalar per quad.
325 */
326 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
327 rho_bld->type, rho, 0);
328 }
329 }
330 else {
331 /*
332 * This looks all a bit complex, but it's not that bad
333 * (the shuffle code makes it look worse than it is).
334 * Still, might not be ideal for all cases.
335 */
336 static const unsigned char swizzle0[] = { /* no-op swizzle */
337 0, LP_BLD_SWIZZLE_DONTCARE,
338 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
339 };
340 static const unsigned char swizzle1[] = {
341 1, LP_BLD_SWIZZLE_DONTCARE,
342 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
343 };
344 static const unsigned char swizzle2[] = {
345 2, LP_BLD_SWIZZLE_DONTCARE,
346 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
347 };
348
349 if (dims < 2) {
350 ddx_ddy[0] = lp_build_packed_ddx_ddy_onecoord(coord_bld, s);
351 }
352 else if (dims >= 2) {
353 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
354 if (dims > 2) {
355 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
356 }
357 }
358
359 if (no_rho_opt) {
360 static const unsigned char swizzle01[] = { /* no-op swizzle */
361 0, 1,
362 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
363 };
364 static const unsigned char swizzle23[] = {
365 2, 3,
366 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
367 };
368 LLVMValueRef ddx_ddys, ddx_ddyt, floatdim, shuffles[LP_MAX_VECTOR_LENGTH / 4];
369
370 for (i = 0; i < num_quads; i++) {
371 shuffles[i*4+0] = shuffles[i*4+1] = index0;
372 shuffles[i*4+2] = shuffles[i*4+3] = index1;
373 }
374 floatdim = LLVMBuildShuffleVector(builder, float_size, float_size,
375 LLVMConstVector(shuffles, length), "");
376 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], floatdim);
377 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
378 ddx_ddys = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
379 ddx_ddyt = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
380 rho_vec = lp_build_add(coord_bld, ddx_ddys, ddx_ddyt);
381
382 if (dims > 2) {
383 static const unsigned char swizzle02[] = {
384 0, 2,
385 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
386 };
387 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
388 coord_bld->type, float_size, index2);
389 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], floatdim);
390 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
391 ddx_ddy[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
392 rho_vec = lp_build_add(coord_bld, rho_vec, ddx_ddy[1]);
393 }
394
395 rho_xvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
396 rho_yvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
397 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
398
399 if (rho_per_quad) {
400 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
401 rho_bld->type, rho, 0);
402 }
403 else {
404 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
405 }
406 /* skipping sqrt hence returning rho squared */
407 }
408 else {
409 ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
410 if (dims > 2) {
411 ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
412 }
413
414 if (dims < 2) {
415 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle0);
416 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle2);
417 }
418 else if (dims == 2) {
419 static const unsigned char swizzle02[] = {
420 0, 2,
421 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
422 };
423 static const unsigned char swizzle13[] = {
424 1, 3,
425 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
426 };
427 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle02);
428 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle13);
429 }
430 else {
431 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH];
432 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH];
433 assert(dims == 3);
434 for (i = 0; i < num_quads; i++) {
435 shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i);
436 shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2);
437 shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i);
438 shuffles1[4*i + 3] = i32undef;
439 shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1);
440 shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3);
441 shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 2);
442 shuffles2[4*i + 3] = i32undef;
443 }
444 rho_xvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
445 LLVMConstVector(shuffles1, length), "");
446 rho_yvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
447 LLVMConstVector(shuffles2, length), "");
448 }
449
450 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
451
452 if (bld->coord_type.length > 4) {
453 /* expand size to each quad */
454 if (dims > 1) {
455 /* could use some broadcast_vector helper for this? */
456 LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4];
457 for (i = 0; i < num_quads; i++) {
458 src[i] = float_size;
459 }
460 float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads);
461 }
462 else {
463 float_size = lp_build_broadcast_scalar(coord_bld, float_size);
464 }
465 rho_vec = lp_build_mul(coord_bld, rho_vec, float_size);
466
467 if (dims <= 1) {
468 rho = rho_vec;
469 }
470 else {
471 if (dims >= 2) {
472 LLVMValueRef rho_s, rho_t, rho_r;
473
474 rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
475 rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
476
477 rho = lp_build_max(coord_bld, rho_s, rho_t);
478
479 if (dims >= 3) {
480 rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2);
481 rho = lp_build_max(coord_bld, rho, rho_r);
482 }
483 }
484 }
485 if (rho_per_quad) {
486 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
487 rho_bld->type, rho, 0);
488 }
489 else {
490 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
491 }
492 }
493 else {
494 if (dims <= 1) {
495 rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, "");
496 }
497 rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size);
498
499 if (dims <= 1) {
500 rho = rho_vec;
501 }
502 else {
503 if (dims >= 2) {
504 LLVMValueRef rho_s, rho_t, rho_r;
505
506 rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, "");
507 rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, "");
508
509 rho = lp_build_max(float_bld, rho_s, rho_t);
510
511 if (dims >= 3) {
512 rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, "");
513 rho = lp_build_max(float_bld, rho, rho_r);
514 }
515 }
516 }
517 if (!rho_per_quad) {
518 rho = lp_build_broadcast_scalar(rho_bld, rho);
519 }
520 }
521 }
522 }
523
524 return rho;
525 }
526
527
528 /*
529 * Bri-linear lod computation
530 *
531 * Use a piece-wise linear approximation of log2 such that:
532 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
533 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
534 * with the steepness specified in 'factor'
535 * - exact result for 0.5, 1.5, etc.
536 *
537 *
538 * 1.0 - /----*
539 * /
540 * /
541 * /
542 * 0.5 - *
543 * /
544 * /
545 * /
546 * 0.0 - *----/
547 *
548 * | |
549 * 2^0 2^1
550 *
551 * This is a technique also commonly used in hardware:
552 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
553 *
554 * TODO: For correctness, this should only be applied when texture is known to
555 * have regular mipmaps, i.e., mipmaps derived from the base level.
556 *
557 * TODO: This could be done in fixed point, where applicable.
558 */
559 static void
560 lp_build_brilinear_lod(struct lp_build_context *bld,
561 LLVMValueRef lod,
562 double factor,
563 LLVMValueRef *out_lod_ipart,
564 LLVMValueRef *out_lod_fpart)
565 {
566 LLVMValueRef lod_fpart;
567 double pre_offset = (factor - 0.5)/factor - 0.5;
568 double post_offset = 1 - factor;
569
570 if (0) {
571 lp_build_printf(bld->gallivm, "lod = %f\n", lod);
572 }
573
574 lod = lp_build_add(bld, lod,
575 lp_build_const_vec(bld->gallivm, bld->type, pre_offset));
576
577 lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart);
578
579 lod_fpart = lp_build_mul(bld, lod_fpart,
580 lp_build_const_vec(bld->gallivm, bld->type, factor));
581
582 lod_fpart = lp_build_add(bld, lod_fpart,
583 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
584
585 /*
586 * It's not necessary to clamp lod_fpart since:
587 * - the above expression will never produce numbers greater than one.
588 * - the mip filtering branch is only taken if lod_fpart is positive
589 */
590
591 *out_lod_fpart = lod_fpart;
592
593 if (0) {
594 lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart);
595 lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart);
596 }
597 }
598
599
600 /*
601 * Combined log2 and brilinear lod computation.
602 *
603 * It's in all identical to calling lp_build_fast_log2() and
604 * lp_build_brilinear_lod() above, but by combining we can compute the integer
605 * and fractional part independently.
606 */
607 static void
608 lp_build_brilinear_rho(struct lp_build_context *bld,
609 LLVMValueRef rho,
610 double factor,
611 LLVMValueRef *out_lod_ipart,
612 LLVMValueRef *out_lod_fpart)
613 {
614 LLVMValueRef lod_ipart;
615 LLVMValueRef lod_fpart;
616
617 const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor);
618 const double post_offset = 1 - 2*factor;
619
620 assert(bld->type.floating);
621
622 assert(lp_check_value(bld->type, rho));
623
624 /*
625 * The pre factor will make the intersections with the exact powers of two
626 * happen precisely where we want them to be, which means that the integer
627 * part will not need any post adjustments.
628 */
629 rho = lp_build_mul(bld, rho,
630 lp_build_const_vec(bld->gallivm, bld->type, pre_factor));
631
632 /* ipart = ifloor(log2(rho)) */
633 lod_ipart = lp_build_extract_exponent(bld, rho, 0);
634
635 /* fpart = rho / 2**ipart */
636 lod_fpart = lp_build_extract_mantissa(bld, rho);
637
638 lod_fpart = lp_build_mul(bld, lod_fpart,
639 lp_build_const_vec(bld->gallivm, bld->type, factor));
640
641 lod_fpart = lp_build_add(bld, lod_fpart,
642 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
643
644 /*
645 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
646 * - the above expression will never produce numbers greater than one.
647 * - the mip filtering branch is only taken if lod_fpart is positive
648 */
649
650 *out_lod_ipart = lod_ipart;
651 *out_lod_fpart = lod_fpart;
652 }
653
654
655 /**
656 * Fast implementation of iround(log2(sqrt(x))), based on
657 * log2(x^n) == n*log2(x).
658 *
659 * Gives accurate results all the time.
660 * (Could be trivially extended to handle other power-of-two roots.)
661 */
662 static LLVMValueRef
663 lp_build_ilog2_sqrt(struct lp_build_context *bld,
664 LLVMValueRef x)
665 {
666 LLVMBuilderRef builder = bld->gallivm->builder;
667 LLVMValueRef ipart;
668 struct lp_type i_type = lp_int_type(bld->type);
669 LLVMValueRef one = lp_build_const_int_vec(bld->gallivm, i_type, 1);
670
671 assert(bld->type.floating);
672
673 assert(lp_check_value(bld->type, x));
674
675 /* ipart = log2(x) + 0.5 = 0.5*(log2(x^2) + 1.0) */
676 ipart = lp_build_extract_exponent(bld, x, 1);
677 ipart = LLVMBuildAShr(builder, ipart, one, "");
678
679 return ipart;
680 }
681
682
683 /**
684 * Generate code to compute texture level of detail (lambda).
685 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
686 * \param lod_bias optional float vector with the shader lod bias
687 * \param explicit_lod optional float vector with the explicit lod
688 * \param cube_rho rho calculated by cube coord mapping (optional)
689 * \param out_lod_ipart integer part of lod
690 * \param out_lod_fpart float part of lod (never larger than 1 but may be negative)
691 * \param out_lod_positive (mask) if lod is positive (i.e. texture is minified)
692 *
693 * The resulting lod can be scalar per quad or be per element.
694 */
695 void
696 lp_build_lod_selector(struct lp_build_sample_context *bld,
697 unsigned texture_unit,
698 unsigned sampler_unit,
699 LLVMValueRef s,
700 LLVMValueRef t,
701 LLVMValueRef r,
702 LLVMValueRef cube_rho,
703 const struct lp_derivatives *derivs,
704 LLVMValueRef lod_bias, /* optional */
705 LLVMValueRef explicit_lod, /* optional */
706 unsigned mip_filter,
707 LLVMValueRef *out_lod_ipart,
708 LLVMValueRef *out_lod_fpart,
709 LLVMValueRef *out_lod_positive)
710
711 {
712 LLVMBuilderRef builder = bld->gallivm->builder;
713 struct lp_build_context *lodf_bld = &bld->lodf_bld;
714 LLVMValueRef lod;
715
716 *out_lod_ipart = bld->lodi_bld.zero;
717 *out_lod_positive = bld->lodi_bld.zero;
718 *out_lod_fpart = lodf_bld->zero;
719
720 /*
721 * For determining min/mag, we follow GL 4.1 spec, 3.9.12 Texture Magnification:
722 * "Implementations may either unconditionally assume c = 0 for the minification
723 * vs. magnification switch-over point, or may choose to make c depend on the
724 * combination of minification and magnification modes as follows: if the
725 * magnification filter is given by LINEAR and the minification filter is given
726 * by NEAREST_MIPMAP_NEAREST or NEAREST_MIPMAP_LINEAR, then c = 0.5. This is
727 * done to ensure that a minified texture does not appear "sharper" than a
728 * magnified texture. Otherwise c = 0."
729 * And 3.9.11 Texture Minification:
730 * "If lod is less than or equal to the constant c (see section 3.9.12) the
731 * texture is said to be magnified; if it is greater, the texture is minified."
732 * So, using 0 as switchover point always, and using magnification for lod == 0.
733 * Note that the always c = 0 behavior is new (first appearing in GL 3.1 spec),
734 * old GL versions required 0.5 for the modes listed above.
735 * I have no clue about the (undocumented) wishes of d3d9/d3d10 here!
736 */
737
738 if (bld->static_sampler_state->min_max_lod_equal) {
739 /* User is forcing sampling from a particular mipmap level.
740 * This is hit during mipmap generation.
741 */
742 LLVMValueRef min_lod =
743 bld->dynamic_state->min_lod(bld->dynamic_state,
744 bld->gallivm, sampler_unit);
745
746 lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
747 }
748 else {
749 if (explicit_lod) {
750 if (bld->num_lods != bld->coord_type.length)
751 lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
752 lodf_bld->type, explicit_lod, 0);
753 else
754 lod = explicit_lod;
755 }
756 else {
757 LLVMValueRef rho;
758 boolean rho_squared = ((gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) &&
759 (bld->dims > 1)) || cube_rho;
760
761 rho = lp_build_rho(bld, texture_unit, s, t, r, cube_rho, derivs);
762
763 /*
764 * Compute lod = log2(rho)
765 */
766
767 if (!lod_bias &&
768 !bld->static_sampler_state->lod_bias_non_zero &&
769 !bld->static_sampler_state->apply_max_lod &&
770 !bld->static_sampler_state->apply_min_lod) {
771 /*
772 * Special case when there are no post-log2 adjustments, which
773 * saves instructions but keeping the integer and fractional lod
774 * computations separate from the start.
775 */
776
777 if (mip_filter == PIPE_TEX_MIPFILTER_NONE ||
778 mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
779 /*
780 * Don't actually need both values all the time, lod_ipart is
781 * needed for nearest mipfilter, lod_positive if min != mag.
782 */
783 if (rho_squared) {
784 *out_lod_ipart = lp_build_ilog2_sqrt(lodf_bld, rho);
785 }
786 else {
787 *out_lod_ipart = lp_build_ilog2(lodf_bld, rho);
788 }
789 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
790 rho, lodf_bld->one);
791 return;
792 }
793 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR &&
794 !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR) &&
795 !rho_squared) {
796 /*
797 * This can't work if rho is squared. Not sure if it could be
798 * fixed while keeping it worthwile, could also do sqrt here
799 * but brilinear and no_rho_opt seems like a combination not
800 * making much sense anyway so just use ordinary path below.
801 */
802 lp_build_brilinear_rho(lodf_bld, rho, BRILINEAR_FACTOR,
803 out_lod_ipart, out_lod_fpart);
804 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
805 rho, lodf_bld->one);
806 return;
807 }
808 }
809
810 if (0) {
811 lod = lp_build_log2(lodf_bld, rho);
812 }
813 else {
814 lod = lp_build_fast_log2(lodf_bld, rho);
815 }
816 if (rho_squared) {
817 /* log2(x^2) == 0.5*log2(x) */
818 lod = lp_build_mul(lodf_bld, lod,
819 lp_build_const_vec(bld->gallivm, lodf_bld->type, 0.5F));
820 }
821
822 /* add shader lod bias */
823 if (lod_bias) {
824 if (bld->num_lods != bld->coord_type.length)
825 lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
826 lodf_bld->type, lod_bias, 0);
827 lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias");
828 }
829 }
830
831 /* add sampler lod bias */
832 if (bld->static_sampler_state->lod_bias_non_zero) {
833 LLVMValueRef sampler_lod_bias =
834 bld->dynamic_state->lod_bias(bld->dynamic_state,
835 bld->gallivm, sampler_unit);
836 sampler_lod_bias = lp_build_broadcast_scalar(lodf_bld,
837 sampler_lod_bias);
838 lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias");
839 }
840
841 /* clamp lod */
842 if (bld->static_sampler_state->apply_max_lod) {
843 LLVMValueRef max_lod =
844 bld->dynamic_state->max_lod(bld->dynamic_state,
845 bld->gallivm, sampler_unit);
846 max_lod = lp_build_broadcast_scalar(lodf_bld, max_lod);
847
848 lod = lp_build_min(lodf_bld, lod, max_lod);
849 }
850 if (bld->static_sampler_state->apply_min_lod) {
851 LLVMValueRef min_lod =
852 bld->dynamic_state->min_lod(bld->dynamic_state,
853 bld->gallivm, sampler_unit);
854 min_lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
855
856 lod = lp_build_max(lodf_bld, lod, min_lod);
857 }
858 }
859
860 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
861 lod, lodf_bld->zero);
862
863 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
864 if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
865 lp_build_brilinear_lod(lodf_bld, lod, BRILINEAR_FACTOR,
866 out_lod_ipart, out_lod_fpart);
867 }
868 else {
869 lp_build_ifloor_fract(lodf_bld, lod, out_lod_ipart, out_lod_fpart);
870 }
871
872 lp_build_name(*out_lod_fpart, "lod_fpart");
873 }
874 else {
875 *out_lod_ipart = lp_build_iround(lodf_bld, lod);
876 }
877
878 lp_build_name(*out_lod_ipart, "lod_ipart");
879
880 return;
881 }
882
883
884 /**
885 * For PIPE_TEX_MIPFILTER_NEAREST, convert int part of lod
886 * to actual mip level.
887 * Note: this is all scalar per quad code.
888 * \param lod_ipart int texture level of detail
889 * \param level_out returns integer
890 * \param out_of_bounds returns per coord out_of_bounds mask if provided
891 */
892 void
893 lp_build_nearest_mip_level(struct lp_build_sample_context *bld,
894 unsigned texture_unit,
895 LLVMValueRef lod_ipart,
896 LLVMValueRef *level_out,
897 LLVMValueRef *out_of_bounds)
898 {
899 struct lp_build_context *leveli_bld = &bld->leveli_bld;
900 LLVMValueRef first_level, last_level, level;
901
902 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
903 bld->gallivm, texture_unit);
904 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
905 bld->gallivm, texture_unit);
906 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
907 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
908
909 level = lp_build_add(leveli_bld, lod_ipart, first_level);
910
911 if (out_of_bounds) {
912 LLVMValueRef out, out1;
913 out = lp_build_cmp(leveli_bld, PIPE_FUNC_LESS, level, first_level);
914 out1 = lp_build_cmp(leveli_bld, PIPE_FUNC_GREATER, level, last_level);
915 out = lp_build_or(leveli_bld, out, out1);
916 if (bld->num_mips == bld->coord_bld.type.length) {
917 *out_of_bounds = out;
918 }
919 else if (bld->num_mips == 1) {
920 *out_of_bounds = lp_build_broadcast_scalar(&bld->int_coord_bld, out);
921 }
922 else {
923 assert(bld->num_mips == bld->coord_bld.type.length / 4);
924 *out_of_bounds = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
925 leveli_bld->type,
926 bld->int_coord_bld.type,
927 out);
928 }
929 *level_out = level;
930 }
931 else {
932 /* clamp level to legal range of levels */
933 *level_out = lp_build_clamp(leveli_bld, level, first_level, last_level);
934
935 }
936 }
937
938
939 /**
940 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad (or per element) int LOD(s)
941 * to two (per-quad) (adjacent) mipmap level indexes, and fix up float lod
942 * part accordingly.
943 * Later, we'll sample from those two mipmap levels and interpolate between them.
944 */
945 void
946 lp_build_linear_mip_levels(struct lp_build_sample_context *bld,
947 unsigned texture_unit,
948 LLVMValueRef lod_ipart,
949 LLVMValueRef *lod_fpart_inout,
950 LLVMValueRef *level0_out,
951 LLVMValueRef *level1_out)
952 {
953 LLVMBuilderRef builder = bld->gallivm->builder;
954 struct lp_build_context *leveli_bld = &bld->leveli_bld;
955 struct lp_build_context *levelf_bld = &bld->levelf_bld;
956 LLVMValueRef first_level, last_level;
957 LLVMValueRef clamp_min;
958 LLVMValueRef clamp_max;
959
960 assert(bld->num_lods == bld->num_mips);
961
962 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
963 bld->gallivm, texture_unit);
964 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
965 bld->gallivm, texture_unit);
966 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
967 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
968
969 *level0_out = lp_build_add(leveli_bld, lod_ipart, first_level);
970 *level1_out = lp_build_add(leveli_bld, *level0_out, leveli_bld->one);
971
972 /*
973 * Clamp both *level0_out and *level1_out to [first_level, last_level], with
974 * the minimum number of comparisons, and zeroing lod_fpart in the extreme
975 * ends in the process.
976 */
977
978 /*
979 * This code (vector select in particular) only works with llvm 3.1
980 * (if there's more than one quad, with x86 backend). Might consider
981 * converting to our lp_bld_logic helpers.
982 */
983 #if HAVE_LLVM < 0x0301
984 assert(leveli_bld->type.length == 1);
985 #endif
986
987 /* *level0_out < first_level */
988 clamp_min = LLVMBuildICmp(builder, LLVMIntSLT,
989 *level0_out, first_level,
990 "clamp_lod_to_first");
991
992 *level0_out = LLVMBuildSelect(builder, clamp_min,
993 first_level, *level0_out, "");
994
995 *level1_out = LLVMBuildSelect(builder, clamp_min,
996 first_level, *level1_out, "");
997
998 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min,
999 levelf_bld->zero, *lod_fpart_inout, "");
1000
1001 /* *level0_out >= last_level */
1002 clamp_max = LLVMBuildICmp(builder, LLVMIntSGE,
1003 *level0_out, last_level,
1004 "clamp_lod_to_last");
1005
1006 *level0_out = LLVMBuildSelect(builder, clamp_max,
1007 last_level, *level0_out, "");
1008
1009 *level1_out = LLVMBuildSelect(builder, clamp_max,
1010 last_level, *level1_out, "");
1011
1012 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max,
1013 levelf_bld->zero, *lod_fpart_inout, "");
1014
1015 lp_build_name(*level0_out, "texture%u_miplevel0", texture_unit);
1016 lp_build_name(*level1_out, "texture%u_miplevel1", texture_unit);
1017 lp_build_name(*lod_fpart_inout, "texture%u_mipweight", texture_unit);
1018 }
1019
1020
1021 /**
1022 * Return pointer to a single mipmap level.
1023 * \param level integer mipmap level
1024 */
1025 LLVMValueRef
1026 lp_build_get_mipmap_level(struct lp_build_sample_context *bld,
1027 LLVMValueRef level)
1028 {
1029 LLVMBuilderRef builder = bld->gallivm->builder;
1030 LLVMValueRef indexes[2], data_ptr, mip_offset;
1031
1032 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1033 indexes[1] = level;
1034 mip_offset = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1035 mip_offset = LLVMBuildLoad(builder, mip_offset, "");
1036 data_ptr = LLVMBuildGEP(builder, bld->base_ptr, &mip_offset, 1, "");
1037 return data_ptr;
1038 }
1039
1040 /**
1041 * Return (per-pixel) offsets to mip levels.
1042 * \param level integer mipmap level
1043 */
1044 LLVMValueRef
1045 lp_build_get_mip_offsets(struct lp_build_sample_context *bld,
1046 LLVMValueRef level)
1047 {
1048 LLVMBuilderRef builder = bld->gallivm->builder;
1049 LLVMValueRef indexes[2], offsets, offset1;
1050
1051 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1052 if (bld->num_mips == 1) {
1053 indexes[1] = level;
1054 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1055 offset1 = LLVMBuildLoad(builder, offset1, "");
1056 offsets = lp_build_broadcast_scalar(&bld->int_coord_bld, offset1);
1057 }
1058 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1059 unsigned i;
1060
1061 offsets = bld->int_coord_bld.undef;
1062 for (i = 0; i < bld->num_mips; i++) {
1063 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1064 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1065 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1066 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1067 offset1 = LLVMBuildLoad(builder, offset1, "");
1068 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexo, "");
1069 }
1070 offsets = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, offsets, 0, 4);
1071 }
1072 else {
1073 unsigned i;
1074
1075 assert (bld->num_mips == bld->coord_bld.type.length);
1076
1077 offsets = bld->int_coord_bld.undef;
1078 for (i = 0; i < bld->num_mips; i++) {
1079 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1080 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1081 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1082 offset1 = LLVMBuildLoad(builder, offset1, "");
1083 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexi, "");
1084 }
1085 }
1086 return offsets;
1087 }
1088
1089
1090 /**
1091 * Codegen equivalent for u_minify().
1092 * Return max(1, base_size >> level);
1093 */
1094 LLVMValueRef
1095 lp_build_minify(struct lp_build_context *bld,
1096 LLVMValueRef base_size,
1097 LLVMValueRef level)
1098 {
1099 LLVMBuilderRef builder = bld->gallivm->builder;
1100 assert(lp_check_value(bld->type, base_size));
1101 assert(lp_check_value(bld->type, level));
1102
1103 if (level == bld->zero) {
1104 /* if we're using mipmap level zero, no minification is needed */
1105 return base_size;
1106 }
1107 else {
1108 LLVMValueRef size =
1109 LLVMBuildLShr(builder, base_size, level, "minify");
1110 assert(bld->type.sign);
1111 size = lp_build_max(bld, size, bld->one);
1112 return size;
1113 }
1114 }
1115
1116
1117 /**
1118 * Dereference stride_array[mipmap_level] array to get a stride.
1119 * Return stride as a vector.
1120 */
1121 static LLVMValueRef
1122 lp_build_get_level_stride_vec(struct lp_build_sample_context *bld,
1123 LLVMValueRef stride_array, LLVMValueRef level)
1124 {
1125 LLVMBuilderRef builder = bld->gallivm->builder;
1126 LLVMValueRef indexes[2], stride, stride1;
1127 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1128 if (bld->num_mips == 1) {
1129 indexes[1] = level;
1130 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1131 stride1 = LLVMBuildLoad(builder, stride1, "");
1132 stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride1);
1133 }
1134 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1135 LLVMValueRef stride1;
1136 unsigned i;
1137
1138 stride = bld->int_coord_bld.undef;
1139 for (i = 0; i < bld->num_mips; i++) {
1140 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1141 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1142 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1143 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1144 stride1 = LLVMBuildLoad(builder, stride1, "");
1145 stride = LLVMBuildInsertElement(builder, stride, stride1, indexo, "");
1146 }
1147 stride = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, stride, 0, 4);
1148 }
1149 else {
1150 LLVMValueRef stride1;
1151 unsigned i;
1152
1153 assert (bld->num_mips == bld->coord_bld.type.length);
1154
1155 stride = bld->int_coord_bld.undef;
1156 for (i = 0; i < bld->coord_bld.type.length; i++) {
1157 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1158 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1159 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1160 stride1 = LLVMBuildLoad(builder, stride1, "");
1161 stride = LLVMBuildInsertElement(builder, stride, stride1, indexi, "");
1162 }
1163 }
1164 return stride;
1165 }
1166
1167
1168 /**
1169 * When sampling a mipmap, we need to compute the width, height, depth
1170 * of the source levels from the level indexes. This helper function
1171 * does that.
1172 */
1173 void
1174 lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld,
1175 LLVMValueRef ilevel,
1176 LLVMValueRef *out_size,
1177 LLVMValueRef *row_stride_vec,
1178 LLVMValueRef *img_stride_vec)
1179 {
1180 const unsigned dims = bld->dims;
1181 LLVMValueRef ilevel_vec;
1182
1183 /*
1184 * Compute width, height, depth at mipmap level 'ilevel'
1185 */
1186 if (bld->num_mips == 1) {
1187 ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel);
1188 *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec);
1189 }
1190 else {
1191 LLVMValueRef int_size_vec;
1192 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
1193 unsigned num_quads = bld->coord_bld.type.length / 4;
1194 unsigned i;
1195
1196 if (bld->num_mips == num_quads) {
1197 /*
1198 * XXX: this should be #ifndef SANE_INSTRUCTION_SET.
1199 * intel "forgot" the variable shift count instruction until avx2.
1200 * A harmless 8x32 shift gets translated into 32 instructions
1201 * (16 extracts, 8 scalar shifts, 8 inserts), llvm is apparently
1202 * unable to recognize if there are really just 2 different shift
1203 * count values. So do the shift 4-wide before expansion.
1204 */
1205 struct lp_build_context bld4;
1206 struct lp_type type4;
1207
1208 type4 = bld->int_coord_bld.type;
1209 type4.length = 4;
1210
1211 lp_build_context_init(&bld4, bld->gallivm, type4);
1212
1213 if (bld->dims == 1) {
1214 assert(bld->int_size_in_bld.type.length == 1);
1215 int_size_vec = lp_build_broadcast_scalar(&bld4,
1216 bld->int_size);
1217 }
1218 else {
1219 assert(bld->int_size_in_bld.type.length == 4);
1220 int_size_vec = bld->int_size;
1221 }
1222
1223 for (i = 0; i < num_quads; i++) {
1224 LLVMValueRef ileveli;
1225 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1226
1227 ileveli = lp_build_extract_broadcast(bld->gallivm,
1228 bld->leveli_bld.type,
1229 bld4.type,
1230 ilevel,
1231 indexi);
1232 tmp[i] = lp_build_minify(&bld4, int_size_vec, ileveli);
1233 }
1234 /*
1235 * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1,
1236 * [w0, w0, w0, w0, w1, w1, w1, w1, ...] otherwise.
1237 */
1238 *out_size = lp_build_concat(bld->gallivm,
1239 tmp,
1240 bld4.type,
1241 num_quads);
1242 }
1243 else {
1244 /* FIXME: this is terrible and results in _huge_ vector
1245 * (for the dims > 1 case).
1246 * Should refactor this (together with extract_image_sizes) and do
1247 * something more useful. Could for instance if we have width,height
1248 * with 4-wide vector pack all elements into a 8xi16 vector
1249 * (on which we can still do useful math) instead of using a 16xi32
1250 * vector.
1251 * FIXME: some callers can't handle this yet.
1252 * For dims == 1 this will create [w0, w1, w2, w3, ...] vector.
1253 * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector.
1254 */
1255 assert(bld->num_mips == bld->coord_bld.type.length);
1256 if (bld->dims == 1) {
1257 assert(bld->int_size_in_bld.type.length == 1);
1258 int_size_vec = lp_build_broadcast_scalar(&bld->int_coord_bld,
1259 bld->int_size);
1260 /* vector shift with variable shift count alert... */
1261 *out_size = lp_build_minify(&bld->int_coord_bld, int_size_vec, ilevel);
1262 }
1263 else {
1264 LLVMValueRef ilevel1;
1265 for (i = 0; i < bld->num_mips; i++) {
1266 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1267 ilevel1 = lp_build_extract_broadcast(bld->gallivm, bld->int_coord_type,
1268 bld->int_size_in_bld.type, ilevel, indexi);
1269 tmp[i] = bld->int_size;
1270 tmp[i] = lp_build_minify(&bld->int_size_in_bld, tmp[i], ilevel1);
1271 }
1272 *out_size = lp_build_concat(bld->gallivm, tmp,
1273 bld->int_size_in_bld.type,
1274 bld->num_mips);
1275 }
1276 }
1277 }
1278
1279 if (dims >= 2) {
1280 *row_stride_vec = lp_build_get_level_stride_vec(bld,
1281 bld->row_stride_array,
1282 ilevel);
1283 }
1284 if (dims == 3 ||
1285 bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
1286 bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
1287 bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
1288 *img_stride_vec = lp_build_get_level_stride_vec(bld,
1289 bld->img_stride_array,
1290 ilevel);
1291 }
1292 }
1293
1294
1295 /**
1296 * Extract and broadcast texture size.
1297 *
1298 * @param size_type type of the texture size vector (either
1299 * bld->int_size_type or bld->float_size_type)
1300 * @param coord_type type of the texture size vector (either
1301 * bld->int_coord_type or bld->coord_type)
1302 * @param size vector with the texture size (width, height, depth)
1303 */
1304 void
1305 lp_build_extract_image_sizes(struct lp_build_sample_context *bld,
1306 struct lp_build_context *size_bld,
1307 struct lp_type coord_type,
1308 LLVMValueRef size,
1309 LLVMValueRef *out_width,
1310 LLVMValueRef *out_height,
1311 LLVMValueRef *out_depth)
1312 {
1313 const unsigned dims = bld->dims;
1314 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1315 struct lp_type size_type = size_bld->type;
1316
1317 if (bld->num_mips == 1) {
1318 *out_width = lp_build_extract_broadcast(bld->gallivm,
1319 size_type,
1320 coord_type,
1321 size,
1322 LLVMConstInt(i32t, 0, 0));
1323 if (dims >= 2) {
1324 *out_height = lp_build_extract_broadcast(bld->gallivm,
1325 size_type,
1326 coord_type,
1327 size,
1328 LLVMConstInt(i32t, 1, 0));
1329 if (dims == 3) {
1330 *out_depth = lp_build_extract_broadcast(bld->gallivm,
1331 size_type,
1332 coord_type,
1333 size,
1334 LLVMConstInt(i32t, 2, 0));
1335 }
1336 }
1337 }
1338 else {
1339 unsigned num_quads = bld->coord_bld.type.length / 4;
1340
1341 if (dims == 1) {
1342 *out_width = size;
1343 }
1344 else if (bld->num_mips == num_quads) {
1345 *out_width = lp_build_swizzle_scalar_aos(size_bld, size, 0, 4);
1346 if (dims >= 2) {
1347 *out_height = lp_build_swizzle_scalar_aos(size_bld, size, 1, 4);
1348 if (dims == 3) {
1349 *out_depth = lp_build_swizzle_scalar_aos(size_bld, size, 2, 4);
1350 }
1351 }
1352 }
1353 else {
1354 assert(bld->num_mips == bld->coord_type.length);
1355 *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1356 coord_type, size, 0);
1357 if (dims >= 2) {
1358 *out_height = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1359 coord_type, size, 1);
1360 if (dims == 3) {
1361 *out_depth = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1362 coord_type, size, 2);
1363 }
1364 }
1365 }
1366 }
1367 }
1368
1369
1370 /**
1371 * Unnormalize coords.
1372 *
1373 * @param flt_size vector with the integer texture size (width, height, depth)
1374 */
1375 void
1376 lp_build_unnormalized_coords(struct lp_build_sample_context *bld,
1377 LLVMValueRef flt_size,
1378 LLVMValueRef *s,
1379 LLVMValueRef *t,
1380 LLVMValueRef *r)
1381 {
1382 const unsigned dims = bld->dims;
1383 LLVMValueRef width;
1384 LLVMValueRef height;
1385 LLVMValueRef depth;
1386
1387 lp_build_extract_image_sizes(bld,
1388 &bld->float_size_bld,
1389 bld->coord_type,
1390 flt_size,
1391 &width,
1392 &height,
1393 &depth);
1394
1395 /* s = s * width, t = t * height */
1396 *s = lp_build_mul(&bld->coord_bld, *s, width);
1397 if (dims >= 2) {
1398 *t = lp_build_mul(&bld->coord_bld, *t, height);
1399 if (dims >= 3) {
1400 *r = lp_build_mul(&bld->coord_bld, *r, depth);
1401 }
1402 }
1403 }
1404
1405
1406 /** Helper used by lp_build_cube_lookup() */
1407 static LLVMValueRef
1408 lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord)
1409 {
1410 /* ima = +0.5 / abs(coord); */
1411 LLVMValueRef posHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1412 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1413 LLVMValueRef ima = lp_build_div(coord_bld, posHalf, absCoord);
1414 return ima;
1415 }
1416
1417
1418 /** Helper for doing 3-wise selection.
1419 * Returns sel1 ? val2 : (sel0 ? val0 : val1).
1420 */
1421 static LLVMValueRef
1422 lp_build_select3(struct lp_build_context *sel_bld,
1423 LLVMValueRef sel0,
1424 LLVMValueRef sel1,
1425 LLVMValueRef val0,
1426 LLVMValueRef val1,
1427 LLVMValueRef val2)
1428 {
1429 LLVMValueRef tmp;
1430 tmp = lp_build_select(sel_bld, sel0, val0, val1);
1431 return lp_build_select(sel_bld, sel1, val2, tmp);
1432 }
1433
1434
1435 /**
1436 * Generate code to do cube face selection and compute per-face texcoords.
1437 */
1438 void
1439 lp_build_cube_lookup(struct lp_build_sample_context *bld,
1440 LLVMValueRef *coords,
1441 const struct lp_derivatives *derivs_in, /* optional */
1442 LLVMValueRef *rho,
1443 struct lp_derivatives *derivs_out, /* optional */
1444 boolean need_derivs)
1445 {
1446 struct lp_build_context *coord_bld = &bld->coord_bld;
1447 LLVMBuilderRef builder = bld->gallivm->builder;
1448 struct gallivm_state *gallivm = bld->gallivm;
1449 LLVMValueRef si, ti, ri;
1450
1451 /*
1452 * Do per-pixel face selection. We cannot however (as we used to do)
1453 * simply calculate the derivs afterwards (which is very bogus for
1454 * explicit derivs btw) because the values would be "random" when
1455 * not all pixels lie on the same face. So what we do here is just
1456 * calculate the derivatives after scaling the coords by the absolute
1457 * value of the inverse major axis, and essentially do rho calculation
1458 * steps as if it were a 3d texture. This is perfect if all pixels hit
1459 * the same face, but not so great at edges, I believe the max error
1460 * should be sqrt(2) with no_rho_approx or 2 otherwise (essentially measuring
1461 * the 3d distance between 2 points on the cube instead of measuring up/down
1462 * the edge). Still this is possibly a win over just selecting the same face
1463 * for all pixels. Unfortunately, something like that doesn't work for
1464 * explicit derivatives.
1465 */
1466 struct lp_build_context *cint_bld = &bld->int_coord_bld;
1467 struct lp_type intctype = cint_bld->type;
1468 LLVMTypeRef coord_vec_type = coord_bld->vec_type;
1469 LLVMTypeRef cint_vec_type = cint_bld->vec_type;
1470 LLVMValueRef as, at, ar, face, face_s, face_t;
1471 LLVMValueRef as_ge_at, maxasat, ar_ge_as_at;
1472 LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz;
1473 LLVMValueRef tnegi, rnegi;
1474 LLVMValueRef ma, mai, signma, signmabit, imahalfpos;
1475 LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5);
1476 LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype,
1477 1 << (intctype.width - 1));
1478 LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype,
1479 intctype.width -1);
1480 LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X);
1481 LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y);
1482 LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z);
1483 LLVMValueRef s = coords[0];
1484 LLVMValueRef t = coords[1];
1485 LLVMValueRef r = coords[2];
1486
1487 assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1);
1488 assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1);
1489 assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1);
1490
1491 /*
1492 * get absolute value (for x/y/z face selection) and sign bit
1493 * (for mirroring minor coords and pos/neg face selection)
1494 * of the original coords.
1495 */
1496 as = lp_build_abs(&bld->coord_bld, s);
1497 at = lp_build_abs(&bld->coord_bld, t);
1498 ar = lp_build_abs(&bld->coord_bld, r);
1499
1500 /*
1501 * major face determination: select x if x > y else select y
1502 * select z if z >= max(x,y) else select previous result
1503 * if some axis are the same we chose z over y, y over x - the
1504 * dx10 spec seems to ask for it while OpenGL doesn't care (if we
1505 * wouldn't care could save a select or two if using different
1506 * compares and doing at_g_as_ar last since tnewx and tnewz are the
1507 * same).
1508 */
1509 as_ge_at = lp_build_cmp(coord_bld, PIPE_FUNC_GREATER, as, at);
1510 maxasat = lp_build_max(coord_bld, as, at);
1511 ar_ge_as_at = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, ar, maxasat);
1512
1513 if (need_derivs && (derivs_in ||
1514 ((gallivm_debug & GALLIVM_DEBUG_NO_QUAD_LOD) &&
1515 (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX)))) {
1516 /*
1517 * XXX: This is really really complex.
1518 * It is a bit overkill to use this for implicit derivatives as well,
1519 * no way this is worth the cost in practice, but seems to be the
1520 * only way for getting accurate and per-pixel lod values.
1521 */
1522 LLVMValueRef ima, imahalf, tmp, ddx[3], ddy[3];
1523 LLVMValueRef madx, mady, madxdivma, madydivma;
1524 LLVMValueRef sdxi, tdxi, rdxi, sdyi, tdyi, rdyi;
1525 LLVMValueRef tdxnegi, rdxnegi, tdynegi, rdynegi;
1526 LLVMValueRef sdxnewx, sdxnewy, sdxnewz, tdxnewx, tdxnewy, tdxnewz;
1527 LLVMValueRef sdynewx, sdynewy, sdynewz, tdynewx, tdynewy, tdynewz;
1528 LLVMValueRef face_sdx, face_tdx, face_sdy, face_tdy;
1529 /*
1530 * s = 1/2 * ( sc / ma + 1)
1531 * t = 1/2 * ( tc / ma + 1)
1532 *
1533 * s' = 1/2 * (sc' * ma - sc * ma') / ma^2
1534 * t' = 1/2 * (tc' * ma - tc * ma') / ma^2
1535 *
1536 * dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma
1537 * dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma
1538 * dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma
1539 * dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma
1540 */
1541
1542 /* select ma, calculate ima */
1543 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1544 mai = LLVMBuildBitCast(builder, ma, cint_vec_type, "");
1545 signmabit = LLVMBuildAnd(builder, mai, signmask, "");
1546 ima = lp_build_div(coord_bld, coord_bld->one, ma);
1547 imahalf = lp_build_mul(coord_bld, posHalf, ima);
1548 imahalfpos = lp_build_abs(coord_bld, imahalf);
1549
1550 if (!derivs_in) {
1551 ddx[0] = lp_build_ddx(coord_bld, s);
1552 ddx[1] = lp_build_ddx(coord_bld, t);
1553 ddx[2] = lp_build_ddx(coord_bld, r);
1554 ddy[0] = lp_build_ddy(coord_bld, s);
1555 ddy[1] = lp_build_ddy(coord_bld, t);
1556 ddy[2] = lp_build_ddy(coord_bld, r);
1557 }
1558 else {
1559 ddx[0] = derivs_in->ddx[0];
1560 ddx[1] = derivs_in->ddx[1];
1561 ddx[2] = derivs_in->ddx[2];
1562 ddy[0] = derivs_in->ddy[0];
1563 ddy[1] = derivs_in->ddy[1];
1564 ddy[2] = derivs_in->ddy[2];
1565 }
1566
1567 /* select major derivatives */
1568 madx = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddx[0], ddx[1], ddx[2]);
1569 mady = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddy[0], ddy[1], ddy[2]);
1570
1571 si = LLVMBuildBitCast(builder, s, cint_vec_type, "");
1572 ti = LLVMBuildBitCast(builder, t, cint_vec_type, "");
1573 ri = LLVMBuildBitCast(builder, r, cint_vec_type, "");
1574
1575 sdxi = LLVMBuildBitCast(builder, ddx[0], cint_vec_type, "");
1576 tdxi = LLVMBuildBitCast(builder, ddx[1], cint_vec_type, "");
1577 rdxi = LLVMBuildBitCast(builder, ddx[2], cint_vec_type, "");
1578
1579 sdyi = LLVMBuildBitCast(builder, ddy[0], cint_vec_type, "");
1580 tdyi = LLVMBuildBitCast(builder, ddy[1], cint_vec_type, "");
1581 rdyi = LLVMBuildBitCast(builder, ddy[2], cint_vec_type, "");
1582
1583 /*
1584 * compute all possible new s/t coords, which does the mirroring,
1585 * and do the same for derivs minor axes.
1586 * snewx = signma * -r;
1587 * tnewx = -t;
1588 * snewy = s;
1589 * tnewy = signma * r;
1590 * snewz = signma * s;
1591 * tnewz = -t;
1592 */
1593 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1594 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1595 tdxnegi = LLVMBuildXor(builder, tdxi, signmask, "");
1596 rdxnegi = LLVMBuildXor(builder, rdxi, signmask, "");
1597 tdynegi = LLVMBuildXor(builder, tdyi, signmask, "");
1598 rdynegi = LLVMBuildXor(builder, rdyi, signmask, "");
1599
1600 snewx = LLVMBuildXor(builder, signmabit, rnegi, "");
1601 tnewx = tnegi;
1602 sdxnewx = LLVMBuildXor(builder, signmabit, rdxnegi, "");
1603 tdxnewx = tdxnegi;
1604 sdynewx = LLVMBuildXor(builder, signmabit, rdynegi, "");
1605 tdynewx = tdynegi;
1606
1607 snewy = si;
1608 tnewy = LLVMBuildXor(builder, signmabit, ri, "");
1609 sdxnewy = sdxi;
1610 tdxnewy = LLVMBuildXor(builder, signmabit, rdxi, "");
1611 sdynewy = sdyi;
1612 tdynewy = LLVMBuildXor(builder, signmabit, rdyi, "");
1613
1614 snewz = LLVMBuildXor(builder, signmabit, si, "");
1615 tnewz = tnegi;
1616 sdxnewz = LLVMBuildXor(builder, signmabit, sdxi, "");
1617 tdxnewz = tdxnegi;
1618 sdynewz = LLVMBuildXor(builder, signmabit, sdyi, "");
1619 tdynewz = tdynegi;
1620
1621 /* select the mirrored values */
1622 face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez);
1623 face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz);
1624 face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz);
1625 face_sdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdxnewx, sdxnewy, sdxnewz);
1626 face_tdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdxnewx, tdxnewy, tdxnewz);
1627 face_sdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdynewx, sdynewy, sdynewz);
1628 face_tdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdynewx, tdynewy, tdynewz);
1629
1630 face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, "");
1631 face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, "");
1632 face_sdx = LLVMBuildBitCast(builder, face_sdx, coord_vec_type, "");
1633 face_tdx = LLVMBuildBitCast(builder, face_tdx, coord_vec_type, "");
1634 face_sdy = LLVMBuildBitCast(builder, face_sdy, coord_vec_type, "");
1635 face_tdy = LLVMBuildBitCast(builder, face_tdy, coord_vec_type, "");
1636
1637 /* deriv math, dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma */
1638 madxdivma = lp_build_mul(coord_bld, madx, ima);
1639 tmp = lp_build_mul(coord_bld, madxdivma, face_s);
1640 tmp = lp_build_sub(coord_bld, face_sdx, tmp);
1641 derivs_out->ddx[0] = lp_build_mul(coord_bld, tmp, imahalf);
1642
1643 /* dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma */
1644 tmp = lp_build_mul(coord_bld, madxdivma, face_t);
1645 tmp = lp_build_sub(coord_bld, face_tdx, tmp);
1646 derivs_out->ddx[1] = lp_build_mul(coord_bld, tmp, imahalf);
1647
1648 /* dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma */
1649 madydivma = lp_build_mul(coord_bld, mady, ima);
1650 tmp = lp_build_mul(coord_bld, madydivma, face_s);
1651 tmp = lp_build_sub(coord_bld, face_sdy, tmp);
1652 derivs_out->ddy[0] = lp_build_mul(coord_bld, tmp, imahalf);
1653
1654 /* dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma */
1655 tmp = lp_build_mul(coord_bld, madydivma, face_t);
1656 tmp = lp_build_sub(coord_bld, face_tdy, tmp);
1657 derivs_out->ddy[1] = lp_build_mul(coord_bld, tmp, imahalf);
1658
1659 signma = LLVMBuildLShr(builder, mai, signshift, "");
1660 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1661
1662 /* project coords */
1663 face_s = lp_build_mul(coord_bld, face_s, imahalfpos);
1664 face_t = lp_build_mul(coord_bld, face_t, imahalfpos);
1665
1666 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
1667 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
1668
1669 return;
1670 }
1671
1672 else if (need_derivs) {
1673 LLVMValueRef ddx_ddy[2], tmp[3], rho_vec;
1674 static const unsigned char swizzle0[] = { /* no-op swizzle */
1675 0, LP_BLD_SWIZZLE_DONTCARE,
1676 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1677 };
1678 static const unsigned char swizzle1[] = {
1679 1, LP_BLD_SWIZZLE_DONTCARE,
1680 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1681 };
1682 static const unsigned char swizzle01[] = { /* no-op swizzle */
1683 0, 1,
1684 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1685 };
1686 static const unsigned char swizzle23[] = {
1687 2, 3,
1688 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1689 };
1690 static const unsigned char swizzle02[] = {
1691 0, 2,
1692 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1693 };
1694
1695 /*
1696 * scale the s/t/r coords pre-select/mirror so we can calculate
1697 * "reasonable" derivs.
1698 */
1699 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1700 imahalfpos = lp_build_cube_imapos(coord_bld, ma);
1701 s = lp_build_mul(coord_bld, s, imahalfpos);
1702 t = lp_build_mul(coord_bld, t, imahalfpos);
1703 r = lp_build_mul(coord_bld, r, imahalfpos);
1704
1705 /*
1706 * This isn't quite the same as the "ordinary" (3d deriv) path since we
1707 * know the texture is square which simplifies things (we can omit the
1708 * size mul which happens very early completely here and do it at the
1709 * very end).
1710 * Also always do calculations according to GALLIVM_DEBUG_NO_RHO_APPROX
1711 * since the error can get quite big otherwise at edges.
1712 * (With no_rho_approx max error is sqrt(2) at edges, same as it is
1713 * without no_rho_approx for 2d textures, otherwise it would be factor 2.)
1714 */
1715 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
1716 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
1717
1718 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
1719 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
1720
1721 tmp[0] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
1722 tmp[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
1723 tmp[2] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
1724
1725 rho_vec = lp_build_add(coord_bld, tmp[0], tmp[1]);
1726 rho_vec = lp_build_add(coord_bld, rho_vec, tmp[2]);
1727
1728 tmp[0] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
1729 tmp[1] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
1730 *rho = lp_build_max(coord_bld, tmp[0], tmp[1]);
1731 }
1732
1733 if (!need_derivs) {
1734 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1735 }
1736 mai = LLVMBuildBitCast(builder, ma, cint_vec_type, "");
1737 signmabit = LLVMBuildAnd(builder, mai, signmask, "");
1738
1739 si = LLVMBuildBitCast(builder, s, cint_vec_type, "");
1740 ti = LLVMBuildBitCast(builder, t, cint_vec_type, "");
1741 ri = LLVMBuildBitCast(builder, r, cint_vec_type, "");
1742
1743 /*
1744 * compute all possible new s/t coords, which does the mirroring
1745 * snewx = signma * -r;
1746 * tnewx = -t;
1747 * snewy = s;
1748 * tnewy = signma * r;
1749 * snewz = signma * s;
1750 * tnewz = -t;
1751 */
1752 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1753 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1754
1755 snewx = LLVMBuildXor(builder, signmabit, rnegi, "");
1756 tnewx = tnegi;
1757
1758 snewy = si;
1759 tnewy = LLVMBuildXor(builder, signmabit, ri, "");
1760
1761 snewz = LLVMBuildXor(builder, signmabit, si, "");
1762 tnewz = tnegi;
1763
1764 /* select the mirrored values */
1765 face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz);
1766 face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz);
1767 face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez);
1768
1769 face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, "");
1770 face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, "");
1771
1772 /* add +1 for neg face */
1773 /* XXX with AVX probably want to use another select here -
1774 * as long as we ensure vblendvps gets used we can actually
1775 * skip the comparison and just use sign as a "mask" directly.
1776 */
1777 signma = LLVMBuildLShr(builder, mai, signshift, "");
1778 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1779
1780 /* project coords */
1781 if (!need_derivs) {
1782 imahalfpos = lp_build_cube_imapos(coord_bld, ma);
1783 face_s = lp_build_mul(coord_bld, face_s, imahalfpos);
1784 face_t = lp_build_mul(coord_bld, face_t, imahalfpos);
1785 }
1786
1787 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
1788 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
1789 }
1790
1791
1792 /**
1793 * Compute the partial offset of a pixel block along an arbitrary axis.
1794 *
1795 * @param coord coordinate in pixels
1796 * @param stride number of bytes between rows of successive pixel blocks
1797 * @param block_length number of pixels in a pixels block along the coordinate
1798 * axis
1799 * @param out_offset resulting relative offset of the pixel block in bytes
1800 * @param out_subcoord resulting sub-block pixel coordinate
1801 */
1802 void
1803 lp_build_sample_partial_offset(struct lp_build_context *bld,
1804 unsigned block_length,
1805 LLVMValueRef coord,
1806 LLVMValueRef stride,
1807 LLVMValueRef *out_offset,
1808 LLVMValueRef *out_subcoord)
1809 {
1810 LLVMBuilderRef builder = bld->gallivm->builder;
1811 LLVMValueRef offset;
1812 LLVMValueRef subcoord;
1813
1814 if (block_length == 1) {
1815 subcoord = bld->zero;
1816 }
1817 else {
1818 /*
1819 * Pixel blocks have power of two dimensions. LLVM should convert the
1820 * rem/div to bit arithmetic.
1821 * TODO: Verify this.
1822 * It does indeed BUT it does transform it to scalar (and back) when doing so
1823 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
1824 * The generated code looks seriously unfunny and is quite expensive.
1825 */
1826 #if 0
1827 LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length);
1828 subcoord = LLVMBuildURem(builder, coord, block_width, "");
1829 coord = LLVMBuildUDiv(builder, coord, block_width, "");
1830 #else
1831 unsigned logbase2 = util_logbase2(block_length);
1832 LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2);
1833 LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1);
1834 subcoord = LLVMBuildAnd(builder, coord, block_mask, "");
1835 coord = LLVMBuildLShr(builder, coord, block_shift, "");
1836 #endif
1837 }
1838
1839 offset = lp_build_mul(bld, coord, stride);
1840
1841 assert(out_offset);
1842 assert(out_subcoord);
1843
1844 *out_offset = offset;
1845 *out_subcoord = subcoord;
1846 }
1847
1848
1849 /**
1850 * Compute the offset of a pixel block.
1851 *
1852 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
1853 *
1854 * Returns the relative offset and i,j sub-block coordinates
1855 */
1856 void
1857 lp_build_sample_offset(struct lp_build_context *bld,
1858 const struct util_format_description *format_desc,
1859 LLVMValueRef x,
1860 LLVMValueRef y,
1861 LLVMValueRef z,
1862 LLVMValueRef y_stride,
1863 LLVMValueRef z_stride,
1864 LLVMValueRef *out_offset,
1865 LLVMValueRef *out_i,
1866 LLVMValueRef *out_j)
1867 {
1868 LLVMValueRef x_stride;
1869 LLVMValueRef offset;
1870
1871 x_stride = lp_build_const_vec(bld->gallivm, bld->type,
1872 format_desc->block.bits/8);
1873
1874 lp_build_sample_partial_offset(bld,
1875 format_desc->block.width,
1876 x, x_stride,
1877 &offset, out_i);
1878
1879 if (y && y_stride) {
1880 LLVMValueRef y_offset;
1881 lp_build_sample_partial_offset(bld,
1882 format_desc->block.height,
1883 y, y_stride,
1884 &y_offset, out_j);
1885 offset = lp_build_add(bld, offset, y_offset);
1886 }
1887 else {
1888 *out_j = bld->zero;
1889 }
1890
1891 if (z && z_stride) {
1892 LLVMValueRef z_offset;
1893 LLVMValueRef k;
1894 lp_build_sample_partial_offset(bld,
1895 1, /* pixel blocks are always 2D */
1896 z, z_stride,
1897 &z_offset, &k);
1898 offset = lp_build_add(bld, offset, z_offset);
1899 }
1900
1901 *out_offset = offset;
1902 }