gallivm: some bits of seamless cube filtering implementation
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- common code.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_state.h"
37 #include "util/u_format.h"
38 #include "util/u_math.h"
39 #include "lp_bld_arit.h"
40 #include "lp_bld_const.h"
41 #include "lp_bld_debug.h"
42 #include "lp_bld_printf.h"
43 #include "lp_bld_flow.h"
44 #include "lp_bld_sample.h"
45 #include "lp_bld_swizzle.h"
46 #include "lp_bld_type.h"
47 #include "lp_bld_logic.h"
48 #include "lp_bld_pack.h"
49 #include "lp_bld_quad.h"
50 #include "lp_bld_bitarit.h"
51
52
53 /*
54 * Bri-linear factor. Should be greater than one.
55 */
56 #define BRILINEAR_FACTOR 2
57
58 /**
59 * Does the given texture wrap mode allow sampling the texture border color?
60 * XXX maybe move this into gallium util code.
61 */
62 boolean
63 lp_sampler_wrap_mode_uses_border_color(unsigned mode,
64 unsigned min_img_filter,
65 unsigned mag_img_filter)
66 {
67 switch (mode) {
68 case PIPE_TEX_WRAP_REPEAT:
69 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
70 case PIPE_TEX_WRAP_MIRROR_REPEAT:
71 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
72 return FALSE;
73 case PIPE_TEX_WRAP_CLAMP:
74 case PIPE_TEX_WRAP_MIRROR_CLAMP:
75 if (min_img_filter == PIPE_TEX_FILTER_NEAREST &&
76 mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
77 return FALSE;
78 } else {
79 return TRUE;
80 }
81 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
82 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
83 return TRUE;
84 default:
85 assert(0 && "unexpected wrap mode");
86 return FALSE;
87 }
88 }
89
90
91 /**
92 * Initialize lp_sampler_static_texture_state object with the gallium
93 * texture/sampler_view state (this contains the parts which are
94 * considered static).
95 */
96 void
97 lp_sampler_static_texture_state(struct lp_static_texture_state *state,
98 const struct pipe_sampler_view *view)
99 {
100 const struct pipe_resource *texture;
101
102 memset(state, 0, sizeof *state);
103
104 if (!view || !view->texture)
105 return;
106
107 texture = view->texture;
108
109 state->format = view->format;
110 state->swizzle_r = view->swizzle_r;
111 state->swizzle_g = view->swizzle_g;
112 state->swizzle_b = view->swizzle_b;
113 state->swizzle_a = view->swizzle_a;
114
115 state->target = texture->target;
116 state->pot_width = util_is_power_of_two(texture->width0);
117 state->pot_height = util_is_power_of_two(texture->height0);
118 state->pot_depth = util_is_power_of_two(texture->depth0);
119 state->level_zero_only = !view->u.tex.last_level;
120
121 /*
122 * the layer / element / level parameters are all either dynamic
123 * state or handled transparently wrt execution.
124 */
125 }
126
127
128 /**
129 * Initialize lp_sampler_static_sampler_state object with the gallium sampler
130 * state (this contains the parts which are considered static).
131 */
132 void
133 lp_sampler_static_sampler_state(struct lp_static_sampler_state *state,
134 const struct pipe_sampler_state *sampler)
135 {
136 memset(state, 0, sizeof *state);
137
138 if (!sampler)
139 return;
140
141 /*
142 * We don't copy sampler state over unless it is actually enabled, to avoid
143 * spurious recompiles, as the sampler static state is part of the shader
144 * key.
145 *
146 * Ideally the state tracker or cso_cache module would make all state
147 * canonical, but until that happens it's better to be safe than sorry here.
148 *
149 * XXX: Actually there's much more than can be done here, especially
150 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
151 */
152
153 state->wrap_s = sampler->wrap_s;
154 state->wrap_t = sampler->wrap_t;
155 state->wrap_r = sampler->wrap_r;
156 state->min_img_filter = sampler->min_img_filter;
157 state->mag_img_filter = sampler->mag_img_filter;
158 state->seamless_cube_map = sampler->seamless_cube_map;
159
160 if (sampler->max_lod > 0.0f) {
161 state->min_mip_filter = sampler->min_mip_filter;
162 } else {
163 state->min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
164 }
165
166 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE ||
167 state->min_img_filter != state->mag_img_filter) {
168 if (sampler->lod_bias != 0.0f) {
169 state->lod_bias_non_zero = 1;
170 }
171
172 /* If min_lod == max_lod we can greatly simplify mipmap selection.
173 * This is a case that occurs during automatic mipmap generation.
174 */
175 if (sampler->min_lod == sampler->max_lod) {
176 state->min_max_lod_equal = 1;
177 } else {
178 if (sampler->min_lod > 0.0f) {
179 state->apply_min_lod = 1;
180 }
181
182 /*
183 * XXX this won't do anything with the mesa state tracker which always
184 * sets max_lod to not more than actually present mip maps...
185 */
186 if (sampler->max_lod < (PIPE_MAX_TEXTURE_LEVELS - 1)) {
187 state->apply_max_lod = 1;
188 }
189 }
190 }
191
192 state->compare_mode = sampler->compare_mode;
193 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) {
194 state->compare_func = sampler->compare_func;
195 }
196
197 state->normalized_coords = sampler->normalized_coords;
198 }
199
200
201 /**
202 * Generate code to compute coordinate gradient (rho).
203 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
204 *
205 * The resulting rho has bld->levelf format (per quad or per element).
206 */
207 static LLVMValueRef
208 lp_build_rho(struct lp_build_sample_context *bld,
209 unsigned texture_unit,
210 LLVMValueRef s,
211 LLVMValueRef t,
212 LLVMValueRef r,
213 LLVMValueRef cube_rho,
214 const struct lp_derivatives *derivs)
215 {
216 struct gallivm_state *gallivm = bld->gallivm;
217 struct lp_build_context *int_size_bld = &bld->int_size_in_bld;
218 struct lp_build_context *float_size_bld = &bld->float_size_in_bld;
219 struct lp_build_context *float_bld = &bld->float_bld;
220 struct lp_build_context *coord_bld = &bld->coord_bld;
221 struct lp_build_context *rho_bld = &bld->lodf_bld;
222 const unsigned dims = bld->dims;
223 LLVMValueRef ddx_ddy[2];
224 LLVMBuilderRef builder = bld->gallivm->builder;
225 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
226 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
227 LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0);
228 LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0);
229 LLVMValueRef rho_vec;
230 LLVMValueRef int_size, float_size;
231 LLVMValueRef rho;
232 LLVMValueRef first_level, first_level_vec;
233 unsigned length = coord_bld->type.length;
234 unsigned num_quads = length / 4;
235 boolean rho_per_quad = rho_bld->type.length != length;
236 boolean no_rho_opt = (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) && (dims > 1);
237 unsigned i;
238 LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
239 LLVMValueRef rho_xvec, rho_yvec;
240
241 /* Note that all simplified calculations will only work for isotropic filtering */
242
243 /*
244 * rho calcs are always per quad except for explicit derivs (excluding
245 * the messy cube maps for now) when requested.
246 */
247
248 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
249 bld->gallivm, texture_unit);
250 first_level_vec = lp_build_broadcast_scalar(int_size_bld, first_level);
251 int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec);
252 float_size = lp_build_int_to_float(float_size_bld, int_size);
253
254 if (cube_rho) {
255 LLVMValueRef cubesize;
256 LLVMValueRef index0 = lp_build_const_int32(gallivm, 0);
257
258 /*
259 * Cube map code did already everything except size mul and per-quad extraction.
260 * Luckily cube maps are always quadratic!
261 */
262 if (rho_per_quad) {
263 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
264 rho_bld->type, cube_rho, 0);
265 }
266 else {
267 rho = lp_build_swizzle_scalar_aos(coord_bld, cube_rho, 0, 4);
268 }
269 /* Could optimize this for single quad just skip the broadcast */
270 cubesize = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
271 rho_bld->type, float_size, index0);
272 if (no_rho_opt) {
273 /* skipping sqrt hence returning rho squared */
274 cubesize = lp_build_mul(rho_bld, cubesize, cubesize);
275 }
276 rho = lp_build_mul(rho_bld, cubesize, rho);
277 }
278 else if (derivs && !(bld->static_texture_state->target == PIPE_TEXTURE_CUBE)) {
279 LLVMValueRef ddmax[3], ddx[3], ddy[3];
280 for (i = 0; i < dims; i++) {
281 LLVMValueRef floatdim;
282 LLVMValueRef indexi = lp_build_const_int32(gallivm, i);
283
284 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
285 coord_bld->type, float_size, indexi);
286
287 /*
288 * note that for rho_per_quad case could reduce math (at some shuffle
289 * cost), but for now use same code to per-pixel lod case.
290 */
291 if (no_rho_opt) {
292 ddx[i] = lp_build_mul(coord_bld, floatdim, derivs->ddx[i]);
293 ddy[i] = lp_build_mul(coord_bld, floatdim, derivs->ddy[i]);
294 ddx[i] = lp_build_mul(coord_bld, ddx[i], ddx[i]);
295 ddy[i] = lp_build_mul(coord_bld, ddy[i], ddy[i]);
296 }
297 else {
298 LLVMValueRef tmpx, tmpy;
299 tmpx = lp_build_abs(coord_bld, derivs->ddx[i]);
300 tmpy = lp_build_abs(coord_bld, derivs->ddy[i]);
301 ddmax[i] = lp_build_max(coord_bld, tmpx, tmpy);
302 ddmax[i] = lp_build_mul(coord_bld, floatdim, ddmax[i]);
303 }
304 }
305 if (no_rho_opt) {
306 rho_xvec = lp_build_add(coord_bld, ddx[0], ddx[1]);
307 rho_yvec = lp_build_add(coord_bld, ddy[0], ddy[1]);
308 if (dims > 2) {
309 rho_xvec = lp_build_add(coord_bld, rho_xvec, ddx[2]);
310 rho_yvec = lp_build_add(coord_bld, rho_yvec, ddy[2]);
311 }
312 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
313 /* skipping sqrt hence returning rho squared */
314 }
315 else {
316 rho = ddmax[0];
317 if (dims > 1) {
318 rho = lp_build_max(coord_bld, rho, ddmax[1]);
319 if (dims > 2) {
320 rho = lp_build_max(coord_bld, rho, ddmax[2]);
321 }
322 }
323 }
324 if (rho_per_quad) {
325 /*
326 * rho_vec contains per-pixel rho, convert to scalar per quad.
327 */
328 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
329 rho_bld->type, rho, 0);
330 }
331 }
332 else {
333 /*
334 * This looks all a bit complex, but it's not that bad
335 * (the shuffle code makes it look worse than it is).
336 * Still, might not be ideal for all cases.
337 */
338 static const unsigned char swizzle0[] = { /* no-op swizzle */
339 0, LP_BLD_SWIZZLE_DONTCARE,
340 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
341 };
342 static const unsigned char swizzle1[] = {
343 1, LP_BLD_SWIZZLE_DONTCARE,
344 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
345 };
346 static const unsigned char swizzle2[] = {
347 2, LP_BLD_SWIZZLE_DONTCARE,
348 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
349 };
350
351 if (dims < 2) {
352 ddx_ddy[0] = lp_build_packed_ddx_ddy_onecoord(coord_bld, s);
353 }
354 else if (dims >= 2) {
355 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
356 if (dims > 2) {
357 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
358 }
359 }
360
361 if (no_rho_opt) {
362 static const unsigned char swizzle01[] = { /* no-op swizzle */
363 0, 1,
364 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
365 };
366 static const unsigned char swizzle23[] = {
367 2, 3,
368 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
369 };
370 LLVMValueRef ddx_ddys, ddx_ddyt, floatdim, shuffles[LP_MAX_VECTOR_LENGTH / 4];
371
372 for (i = 0; i < num_quads; i++) {
373 shuffles[i*4+0] = shuffles[i*4+1] = index0;
374 shuffles[i*4+2] = shuffles[i*4+3] = index1;
375 }
376 floatdim = LLVMBuildShuffleVector(builder, float_size, float_size,
377 LLVMConstVector(shuffles, length), "");
378 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], floatdim);
379 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
380 ddx_ddys = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
381 ddx_ddyt = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
382 rho_vec = lp_build_add(coord_bld, ddx_ddys, ddx_ddyt);
383
384 if (dims > 2) {
385 static const unsigned char swizzle02[] = {
386 0, 2,
387 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
388 };
389 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
390 coord_bld->type, float_size, index2);
391 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], floatdim);
392 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
393 ddx_ddy[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
394 rho_vec = lp_build_add(coord_bld, rho_vec, ddx_ddy[1]);
395 }
396
397 rho_xvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
398 rho_yvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
399 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
400
401 if (rho_per_quad) {
402 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
403 rho_bld->type, rho, 0);
404 }
405 else {
406 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
407 }
408 /* skipping sqrt hence returning rho squared */
409 }
410 else {
411 ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
412 if (dims > 2) {
413 ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
414 }
415
416 if (dims < 2) {
417 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle0);
418 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle2);
419 }
420 else if (dims == 2) {
421 static const unsigned char swizzle02[] = {
422 0, 2,
423 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
424 };
425 static const unsigned char swizzle13[] = {
426 1, 3,
427 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
428 };
429 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle02);
430 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle13);
431 }
432 else {
433 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH];
434 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH];
435 assert(dims == 3);
436 for (i = 0; i < num_quads; i++) {
437 shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i);
438 shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2);
439 shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i);
440 shuffles1[4*i + 3] = i32undef;
441 shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1);
442 shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3);
443 shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 2);
444 shuffles2[4*i + 3] = i32undef;
445 }
446 rho_xvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
447 LLVMConstVector(shuffles1, length), "");
448 rho_yvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
449 LLVMConstVector(shuffles2, length), "");
450 }
451
452 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
453
454 if (bld->coord_type.length > 4) {
455 /* expand size to each quad */
456 if (dims > 1) {
457 /* could use some broadcast_vector helper for this? */
458 LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4];
459 for (i = 0; i < num_quads; i++) {
460 src[i] = float_size;
461 }
462 float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads);
463 }
464 else {
465 float_size = lp_build_broadcast_scalar(coord_bld, float_size);
466 }
467 rho_vec = lp_build_mul(coord_bld, rho_vec, float_size);
468
469 if (dims <= 1) {
470 rho = rho_vec;
471 }
472 else {
473 if (dims >= 2) {
474 LLVMValueRef rho_s, rho_t, rho_r;
475
476 rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
477 rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
478
479 rho = lp_build_max(coord_bld, rho_s, rho_t);
480
481 if (dims >= 3) {
482 rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2);
483 rho = lp_build_max(coord_bld, rho, rho_r);
484 }
485 }
486 }
487 if (rho_per_quad) {
488 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
489 rho_bld->type, rho, 0);
490 }
491 else {
492 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
493 }
494 }
495 else {
496 if (dims <= 1) {
497 rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, "");
498 }
499 rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size);
500
501 if (dims <= 1) {
502 rho = rho_vec;
503 }
504 else {
505 if (dims >= 2) {
506 LLVMValueRef rho_s, rho_t, rho_r;
507
508 rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, "");
509 rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, "");
510
511 rho = lp_build_max(float_bld, rho_s, rho_t);
512
513 if (dims >= 3) {
514 rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, "");
515 rho = lp_build_max(float_bld, rho, rho_r);
516 }
517 }
518 }
519 if (!rho_per_quad) {
520 rho = lp_build_broadcast_scalar(rho_bld, rho);
521 }
522 }
523 }
524 }
525
526 return rho;
527 }
528
529
530 /*
531 * Bri-linear lod computation
532 *
533 * Use a piece-wise linear approximation of log2 such that:
534 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
535 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
536 * with the steepness specified in 'factor'
537 * - exact result for 0.5, 1.5, etc.
538 *
539 *
540 * 1.0 - /----*
541 * /
542 * /
543 * /
544 * 0.5 - *
545 * /
546 * /
547 * /
548 * 0.0 - *----/
549 *
550 * | |
551 * 2^0 2^1
552 *
553 * This is a technique also commonly used in hardware:
554 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
555 *
556 * TODO: For correctness, this should only be applied when texture is known to
557 * have regular mipmaps, i.e., mipmaps derived from the base level.
558 *
559 * TODO: This could be done in fixed point, where applicable.
560 */
561 static void
562 lp_build_brilinear_lod(struct lp_build_context *bld,
563 LLVMValueRef lod,
564 double factor,
565 LLVMValueRef *out_lod_ipart,
566 LLVMValueRef *out_lod_fpart)
567 {
568 LLVMValueRef lod_fpart;
569 double pre_offset = (factor - 0.5)/factor - 0.5;
570 double post_offset = 1 - factor;
571
572 if (0) {
573 lp_build_printf(bld->gallivm, "lod = %f\n", lod);
574 }
575
576 lod = lp_build_add(bld, lod,
577 lp_build_const_vec(bld->gallivm, bld->type, pre_offset));
578
579 lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart);
580
581 lod_fpart = lp_build_mul(bld, lod_fpart,
582 lp_build_const_vec(bld->gallivm, bld->type, factor));
583
584 lod_fpart = lp_build_add(bld, lod_fpart,
585 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
586
587 /*
588 * It's not necessary to clamp lod_fpart since:
589 * - the above expression will never produce numbers greater than one.
590 * - the mip filtering branch is only taken if lod_fpart is positive
591 */
592
593 *out_lod_fpart = lod_fpart;
594
595 if (0) {
596 lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart);
597 lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart);
598 }
599 }
600
601
602 /*
603 * Combined log2 and brilinear lod computation.
604 *
605 * It's in all identical to calling lp_build_fast_log2() and
606 * lp_build_brilinear_lod() above, but by combining we can compute the integer
607 * and fractional part independently.
608 */
609 static void
610 lp_build_brilinear_rho(struct lp_build_context *bld,
611 LLVMValueRef rho,
612 double factor,
613 LLVMValueRef *out_lod_ipart,
614 LLVMValueRef *out_lod_fpart)
615 {
616 LLVMValueRef lod_ipart;
617 LLVMValueRef lod_fpart;
618
619 const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor);
620 const double post_offset = 1 - 2*factor;
621
622 assert(bld->type.floating);
623
624 assert(lp_check_value(bld->type, rho));
625
626 /*
627 * The pre factor will make the intersections with the exact powers of two
628 * happen precisely where we want them to be, which means that the integer
629 * part will not need any post adjustments.
630 */
631 rho = lp_build_mul(bld, rho,
632 lp_build_const_vec(bld->gallivm, bld->type, pre_factor));
633
634 /* ipart = ifloor(log2(rho)) */
635 lod_ipart = lp_build_extract_exponent(bld, rho, 0);
636
637 /* fpart = rho / 2**ipart */
638 lod_fpart = lp_build_extract_mantissa(bld, rho);
639
640 lod_fpart = lp_build_mul(bld, lod_fpart,
641 lp_build_const_vec(bld->gallivm, bld->type, factor));
642
643 lod_fpart = lp_build_add(bld, lod_fpart,
644 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
645
646 /*
647 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
648 * - the above expression will never produce numbers greater than one.
649 * - the mip filtering branch is only taken if lod_fpart is positive
650 */
651
652 *out_lod_ipart = lod_ipart;
653 *out_lod_fpart = lod_fpart;
654 }
655
656
657 /**
658 * Fast implementation of iround(log2(sqrt(x))), based on
659 * log2(x^n) == n*log2(x).
660 *
661 * Gives accurate results all the time.
662 * (Could be trivially extended to handle other power-of-two roots.)
663 */
664 static LLVMValueRef
665 lp_build_ilog2_sqrt(struct lp_build_context *bld,
666 LLVMValueRef x)
667 {
668 LLVMBuilderRef builder = bld->gallivm->builder;
669 LLVMValueRef ipart;
670 struct lp_type i_type = lp_int_type(bld->type);
671 LLVMValueRef one = lp_build_const_int_vec(bld->gallivm, i_type, 1);
672
673 assert(bld->type.floating);
674
675 assert(lp_check_value(bld->type, x));
676
677 /* ipart = log2(x) + 0.5 = 0.5*(log2(x^2) + 1.0) */
678 ipart = lp_build_extract_exponent(bld, x, 1);
679 ipart = LLVMBuildAShr(builder, ipart, one, "");
680
681 return ipart;
682 }
683
684
685 /**
686 * Generate code to compute texture level of detail (lambda).
687 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
688 * \param lod_bias optional float vector with the shader lod bias
689 * \param explicit_lod optional float vector with the explicit lod
690 * \param cube_rho rho calculated by cube coord mapping (optional)
691 * \param out_lod_ipart integer part of lod
692 * \param out_lod_fpart float part of lod (never larger than 1 but may be negative)
693 * \param out_lod_positive (mask) if lod is positive (i.e. texture is minified)
694 *
695 * The resulting lod can be scalar per quad or be per element.
696 */
697 void
698 lp_build_lod_selector(struct lp_build_sample_context *bld,
699 unsigned texture_unit,
700 unsigned sampler_unit,
701 LLVMValueRef s,
702 LLVMValueRef t,
703 LLVMValueRef r,
704 LLVMValueRef cube_rho,
705 const struct lp_derivatives *derivs,
706 LLVMValueRef lod_bias, /* optional */
707 LLVMValueRef explicit_lod, /* optional */
708 unsigned mip_filter,
709 LLVMValueRef *out_lod_ipart,
710 LLVMValueRef *out_lod_fpart,
711 LLVMValueRef *out_lod_positive)
712
713 {
714 LLVMBuilderRef builder = bld->gallivm->builder;
715 struct lp_build_context *lodf_bld = &bld->lodf_bld;
716 LLVMValueRef lod;
717
718 *out_lod_ipart = bld->lodi_bld.zero;
719 *out_lod_positive = bld->lodi_bld.zero;
720 *out_lod_fpart = lodf_bld->zero;
721
722 /*
723 * For determining min/mag, we follow GL 4.1 spec, 3.9.12 Texture Magnification:
724 * "Implementations may either unconditionally assume c = 0 for the minification
725 * vs. magnification switch-over point, or may choose to make c depend on the
726 * combination of minification and magnification modes as follows: if the
727 * magnification filter is given by LINEAR and the minification filter is given
728 * by NEAREST_MIPMAP_NEAREST or NEAREST_MIPMAP_LINEAR, then c = 0.5. This is
729 * done to ensure that a minified texture does not appear "sharper" than a
730 * magnified texture. Otherwise c = 0."
731 * And 3.9.11 Texture Minification:
732 * "If lod is less than or equal to the constant c (see section 3.9.12) the
733 * texture is said to be magnified; if it is greater, the texture is minified."
734 * So, using 0 as switchover point always, and using magnification for lod == 0.
735 * Note that the always c = 0 behavior is new (first appearing in GL 3.1 spec),
736 * old GL versions required 0.5 for the modes listed above.
737 * I have no clue about the (undocumented) wishes of d3d9/d3d10 here!
738 */
739
740 if (bld->static_sampler_state->min_max_lod_equal) {
741 /* User is forcing sampling from a particular mipmap level.
742 * This is hit during mipmap generation.
743 */
744 LLVMValueRef min_lod =
745 bld->dynamic_state->min_lod(bld->dynamic_state,
746 bld->gallivm, sampler_unit);
747
748 lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
749 }
750 else {
751 if (explicit_lod) {
752 if (bld->num_lods != bld->coord_type.length)
753 lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
754 lodf_bld->type, explicit_lod, 0);
755 else
756 lod = explicit_lod;
757 }
758 else {
759 LLVMValueRef rho;
760 boolean rho_squared = (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) &&
761 (bld->dims > 1);
762
763 rho = lp_build_rho(bld, texture_unit, s, t, r, cube_rho, derivs);
764
765 /*
766 * Compute lod = log2(rho)
767 */
768
769 if (!lod_bias &&
770 !bld->static_sampler_state->lod_bias_non_zero &&
771 !bld->static_sampler_state->apply_max_lod &&
772 !bld->static_sampler_state->apply_min_lod) {
773 /*
774 * Special case when there are no post-log2 adjustments, which
775 * saves instructions but keeping the integer and fractional lod
776 * computations separate from the start.
777 */
778
779 if (mip_filter == PIPE_TEX_MIPFILTER_NONE ||
780 mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
781 /*
782 * Don't actually need both values all the time, lod_ipart is
783 * needed for nearest mipfilter, lod_positive if min != mag.
784 */
785 if (rho_squared) {
786 *out_lod_ipart = lp_build_ilog2_sqrt(lodf_bld, rho);
787 }
788 else {
789 *out_lod_ipart = lp_build_ilog2(lodf_bld, rho);
790 }
791 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
792 rho, lodf_bld->one);
793 return;
794 }
795 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR &&
796 !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR) &&
797 !rho_squared) {
798 /*
799 * This can't work if rho is squared. Not sure if it could be
800 * fixed while keeping it worthwile, could also do sqrt here
801 * but brilinear and no_rho_opt seems like a combination not
802 * making much sense anyway so just use ordinary path below.
803 */
804 lp_build_brilinear_rho(lodf_bld, rho, BRILINEAR_FACTOR,
805 out_lod_ipart, out_lod_fpart);
806 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
807 rho, lodf_bld->one);
808 return;
809 }
810 }
811
812 if (0) {
813 lod = lp_build_log2(lodf_bld, rho);
814 }
815 else {
816 lod = lp_build_fast_log2(lodf_bld, rho);
817 }
818 if (rho_squared) {
819 /* log2(x^2) == 0.5*log2(x) */
820 lod = lp_build_mul(lodf_bld, lod,
821 lp_build_const_vec(bld->gallivm, lodf_bld->type, 0.5F));
822 }
823
824 /* add shader lod bias */
825 if (lod_bias) {
826 if (bld->num_lods != bld->coord_type.length)
827 lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
828 lodf_bld->type, lod_bias, 0);
829 lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias");
830 }
831 }
832
833 /* add sampler lod bias */
834 if (bld->static_sampler_state->lod_bias_non_zero) {
835 LLVMValueRef sampler_lod_bias =
836 bld->dynamic_state->lod_bias(bld->dynamic_state,
837 bld->gallivm, sampler_unit);
838 sampler_lod_bias = lp_build_broadcast_scalar(lodf_bld,
839 sampler_lod_bias);
840 lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias");
841 }
842
843 /* clamp lod */
844 if (bld->static_sampler_state->apply_max_lod) {
845 LLVMValueRef max_lod =
846 bld->dynamic_state->max_lod(bld->dynamic_state,
847 bld->gallivm, sampler_unit);
848 max_lod = lp_build_broadcast_scalar(lodf_bld, max_lod);
849
850 lod = lp_build_min(lodf_bld, lod, max_lod);
851 }
852 if (bld->static_sampler_state->apply_min_lod) {
853 LLVMValueRef min_lod =
854 bld->dynamic_state->min_lod(bld->dynamic_state,
855 bld->gallivm, sampler_unit);
856 min_lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
857
858 lod = lp_build_max(lodf_bld, lod, min_lod);
859 }
860 }
861
862 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
863 lod, lodf_bld->zero);
864
865 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
866 if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
867 lp_build_brilinear_lod(lodf_bld, lod, BRILINEAR_FACTOR,
868 out_lod_ipart, out_lod_fpart);
869 }
870 else {
871 lp_build_ifloor_fract(lodf_bld, lod, out_lod_ipart, out_lod_fpart);
872 }
873
874 lp_build_name(*out_lod_fpart, "lod_fpart");
875 }
876 else {
877 *out_lod_ipart = lp_build_iround(lodf_bld, lod);
878 }
879
880 lp_build_name(*out_lod_ipart, "lod_ipart");
881
882 return;
883 }
884
885
886 /**
887 * For PIPE_TEX_MIPFILTER_NEAREST, convert int part of lod
888 * to actual mip level.
889 * Note: this is all scalar per quad code.
890 * \param lod_ipart int texture level of detail
891 * \param level_out returns integer
892 * \param out_of_bounds returns per coord out_of_bounds mask if provided
893 */
894 void
895 lp_build_nearest_mip_level(struct lp_build_sample_context *bld,
896 unsigned texture_unit,
897 LLVMValueRef lod_ipart,
898 LLVMValueRef *level_out,
899 LLVMValueRef *out_of_bounds)
900 {
901 struct lp_build_context *leveli_bld = &bld->leveli_bld;
902 LLVMValueRef first_level, last_level, level;
903
904 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
905 bld->gallivm, texture_unit);
906 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
907 bld->gallivm, texture_unit);
908 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
909 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
910
911 level = lp_build_add(leveli_bld, lod_ipart, first_level);
912
913 if (out_of_bounds) {
914 LLVMValueRef out, out1;
915 out = lp_build_cmp(leveli_bld, PIPE_FUNC_LESS, level, first_level);
916 out1 = lp_build_cmp(leveli_bld, PIPE_FUNC_GREATER, level, last_level);
917 out = lp_build_or(leveli_bld, out, out1);
918 if (bld->num_mips == bld->coord_bld.type.length) {
919 *out_of_bounds = out;
920 }
921 else if (bld->num_mips == 1) {
922 *out_of_bounds = lp_build_broadcast_scalar(&bld->int_coord_bld, out);
923 }
924 else {
925 assert(bld->num_mips == bld->coord_bld.type.length / 4);
926 *out_of_bounds = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
927 leveli_bld->type,
928 bld->int_coord_bld.type,
929 out);
930 }
931 *level_out = level;
932 }
933 else {
934 /* clamp level to legal range of levels */
935 *level_out = lp_build_clamp(leveli_bld, level, first_level, last_level);
936
937 }
938 }
939
940
941 /**
942 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad (or per element) int LOD(s)
943 * to two (per-quad) (adjacent) mipmap level indexes, and fix up float lod
944 * part accordingly.
945 * Later, we'll sample from those two mipmap levels and interpolate between them.
946 */
947 void
948 lp_build_linear_mip_levels(struct lp_build_sample_context *bld,
949 unsigned texture_unit,
950 LLVMValueRef lod_ipart,
951 LLVMValueRef *lod_fpart_inout,
952 LLVMValueRef *level0_out,
953 LLVMValueRef *level1_out)
954 {
955 LLVMBuilderRef builder = bld->gallivm->builder;
956 struct lp_build_context *leveli_bld = &bld->leveli_bld;
957 struct lp_build_context *levelf_bld = &bld->levelf_bld;
958 LLVMValueRef first_level, last_level;
959 LLVMValueRef clamp_min;
960 LLVMValueRef clamp_max;
961
962 assert(bld->num_lods == bld->num_mips);
963
964 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
965 bld->gallivm, texture_unit);
966 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
967 bld->gallivm, texture_unit);
968 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
969 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
970
971 *level0_out = lp_build_add(leveli_bld, lod_ipart, first_level);
972 *level1_out = lp_build_add(leveli_bld, *level0_out, leveli_bld->one);
973
974 /*
975 * Clamp both *level0_out and *level1_out to [first_level, last_level], with
976 * the minimum number of comparisons, and zeroing lod_fpart in the extreme
977 * ends in the process.
978 */
979
980 /*
981 * This code (vector select in particular) only works with llvm 3.1
982 * (if there's more than one quad, with x86 backend). Might consider
983 * converting to our lp_bld_logic helpers.
984 */
985 #if HAVE_LLVM < 0x0301
986 assert(leveli_bld->type.length == 1);
987 #endif
988
989 /* *level0_out < first_level */
990 clamp_min = LLVMBuildICmp(builder, LLVMIntSLT,
991 *level0_out, first_level,
992 "clamp_lod_to_first");
993
994 *level0_out = LLVMBuildSelect(builder, clamp_min,
995 first_level, *level0_out, "");
996
997 *level1_out = LLVMBuildSelect(builder, clamp_min,
998 first_level, *level1_out, "");
999
1000 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min,
1001 levelf_bld->zero, *lod_fpart_inout, "");
1002
1003 /* *level0_out >= last_level */
1004 clamp_max = LLVMBuildICmp(builder, LLVMIntSGE,
1005 *level0_out, last_level,
1006 "clamp_lod_to_last");
1007
1008 *level0_out = LLVMBuildSelect(builder, clamp_max,
1009 last_level, *level0_out, "");
1010
1011 *level1_out = LLVMBuildSelect(builder, clamp_max,
1012 last_level, *level1_out, "");
1013
1014 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max,
1015 levelf_bld->zero, *lod_fpart_inout, "");
1016
1017 lp_build_name(*level0_out, "texture%u_miplevel0", texture_unit);
1018 lp_build_name(*level1_out, "texture%u_miplevel1", texture_unit);
1019 lp_build_name(*lod_fpart_inout, "texture%u_mipweight", texture_unit);
1020 }
1021
1022
1023 /**
1024 * Return pointer to a single mipmap level.
1025 * \param level integer mipmap level
1026 */
1027 LLVMValueRef
1028 lp_build_get_mipmap_level(struct lp_build_sample_context *bld,
1029 LLVMValueRef level)
1030 {
1031 LLVMBuilderRef builder = bld->gallivm->builder;
1032 LLVMValueRef indexes[2], data_ptr, mip_offset;
1033
1034 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1035 indexes[1] = level;
1036 mip_offset = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1037 mip_offset = LLVMBuildLoad(builder, mip_offset, "");
1038 data_ptr = LLVMBuildGEP(builder, bld->base_ptr, &mip_offset, 1, "");
1039 return data_ptr;
1040 }
1041
1042 /**
1043 * Return (per-pixel) offsets to mip levels.
1044 * \param level integer mipmap level
1045 */
1046 LLVMValueRef
1047 lp_build_get_mip_offsets(struct lp_build_sample_context *bld,
1048 LLVMValueRef level)
1049 {
1050 LLVMBuilderRef builder = bld->gallivm->builder;
1051 LLVMValueRef indexes[2], offsets, offset1;
1052
1053 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1054 if (bld->num_mips == 1) {
1055 indexes[1] = level;
1056 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1057 offset1 = LLVMBuildLoad(builder, offset1, "");
1058 offsets = lp_build_broadcast_scalar(&bld->int_coord_bld, offset1);
1059 }
1060 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1061 unsigned i;
1062
1063 offsets = bld->int_coord_bld.undef;
1064 for (i = 0; i < bld->num_mips; i++) {
1065 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1066 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1067 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1068 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1069 offset1 = LLVMBuildLoad(builder, offset1, "");
1070 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexo, "");
1071 }
1072 offsets = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, offsets, 0, 4);
1073 }
1074 else {
1075 unsigned i;
1076
1077 assert (bld->num_mips == bld->coord_bld.type.length);
1078
1079 offsets = bld->int_coord_bld.undef;
1080 for (i = 0; i < bld->num_mips; i++) {
1081 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1082 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1083 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1084 offset1 = LLVMBuildLoad(builder, offset1, "");
1085 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexi, "");
1086 }
1087 }
1088 return offsets;
1089 }
1090
1091
1092 /**
1093 * Codegen equivalent for u_minify().
1094 * Return max(1, base_size >> level);
1095 */
1096 LLVMValueRef
1097 lp_build_minify(struct lp_build_context *bld,
1098 LLVMValueRef base_size,
1099 LLVMValueRef level)
1100 {
1101 LLVMBuilderRef builder = bld->gallivm->builder;
1102 assert(lp_check_value(bld->type, base_size));
1103 assert(lp_check_value(bld->type, level));
1104
1105 if (level == bld->zero) {
1106 /* if we're using mipmap level zero, no minification is needed */
1107 return base_size;
1108 }
1109 else {
1110 LLVMValueRef size =
1111 LLVMBuildLShr(builder, base_size, level, "minify");
1112 assert(bld->type.sign);
1113 size = lp_build_max(bld, size, bld->one);
1114 return size;
1115 }
1116 }
1117
1118
1119 /**
1120 * Dereference stride_array[mipmap_level] array to get a stride.
1121 * Return stride as a vector.
1122 */
1123 static LLVMValueRef
1124 lp_build_get_level_stride_vec(struct lp_build_sample_context *bld,
1125 LLVMValueRef stride_array, LLVMValueRef level)
1126 {
1127 LLVMBuilderRef builder = bld->gallivm->builder;
1128 LLVMValueRef indexes[2], stride, stride1;
1129 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1130 if (bld->num_mips == 1) {
1131 indexes[1] = level;
1132 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1133 stride1 = LLVMBuildLoad(builder, stride1, "");
1134 stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride1);
1135 }
1136 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1137 LLVMValueRef stride1;
1138 unsigned i;
1139
1140 stride = bld->int_coord_bld.undef;
1141 for (i = 0; i < bld->num_mips; i++) {
1142 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1143 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1144 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1145 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1146 stride1 = LLVMBuildLoad(builder, stride1, "");
1147 stride = LLVMBuildInsertElement(builder, stride, stride1, indexo, "");
1148 }
1149 stride = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, stride, 0, 4);
1150 }
1151 else {
1152 LLVMValueRef stride1;
1153 unsigned i;
1154
1155 assert (bld->num_mips == bld->coord_bld.type.length);
1156
1157 stride = bld->int_coord_bld.undef;
1158 for (i = 0; i < bld->coord_bld.type.length; i++) {
1159 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1160 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1161 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1162 stride1 = LLVMBuildLoad(builder, stride1, "");
1163 stride = LLVMBuildInsertElement(builder, stride, stride1, indexi, "");
1164 }
1165 }
1166 return stride;
1167 }
1168
1169
1170 /**
1171 * When sampling a mipmap, we need to compute the width, height, depth
1172 * of the source levels from the level indexes. This helper function
1173 * does that.
1174 */
1175 void
1176 lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld,
1177 LLVMValueRef ilevel,
1178 LLVMValueRef *out_size,
1179 LLVMValueRef *row_stride_vec,
1180 LLVMValueRef *img_stride_vec)
1181 {
1182 const unsigned dims = bld->dims;
1183 LLVMValueRef ilevel_vec;
1184
1185 /*
1186 * Compute width, height, depth at mipmap level 'ilevel'
1187 */
1188 if (bld->num_mips == 1) {
1189 ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel);
1190 *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec);
1191 }
1192 else {
1193 LLVMValueRef int_size_vec;
1194 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
1195 unsigned num_quads = bld->coord_bld.type.length / 4;
1196 unsigned i;
1197
1198 if (bld->num_mips == num_quads) {
1199 /*
1200 * XXX: this should be #ifndef SANE_INSTRUCTION_SET.
1201 * intel "forgot" the variable shift count instruction until avx2.
1202 * A harmless 8x32 shift gets translated into 32 instructions
1203 * (16 extracts, 8 scalar shifts, 8 inserts), llvm is apparently
1204 * unable to recognize if there are really just 2 different shift
1205 * count values. So do the shift 4-wide before expansion.
1206 */
1207 struct lp_build_context bld4;
1208 struct lp_type type4;
1209
1210 type4 = bld->int_coord_bld.type;
1211 type4.length = 4;
1212
1213 lp_build_context_init(&bld4, bld->gallivm, type4);
1214
1215 if (bld->dims == 1) {
1216 assert(bld->int_size_in_bld.type.length == 1);
1217 int_size_vec = lp_build_broadcast_scalar(&bld4,
1218 bld->int_size);
1219 }
1220 else {
1221 assert(bld->int_size_in_bld.type.length == 4);
1222 int_size_vec = bld->int_size;
1223 }
1224
1225 for (i = 0; i < num_quads; i++) {
1226 LLVMValueRef ileveli;
1227 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1228
1229 ileveli = lp_build_extract_broadcast(bld->gallivm,
1230 bld->leveli_bld.type,
1231 bld4.type,
1232 ilevel,
1233 indexi);
1234 tmp[i] = lp_build_minify(&bld4, int_size_vec, ileveli);
1235 }
1236 /*
1237 * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1,
1238 * [w0, w0, w0, w0, w1, w1, w1, w1, ...] otherwise.
1239 */
1240 *out_size = lp_build_concat(bld->gallivm,
1241 tmp,
1242 bld4.type,
1243 num_quads);
1244 }
1245 else {
1246 /* FIXME: this is terrible and results in _huge_ vector
1247 * (for the dims > 1 case).
1248 * Should refactor this (together with extract_image_sizes) and do
1249 * something more useful. Could for instance if we have width,height
1250 * with 4-wide vector pack all elements into a 8xi16 vector
1251 * (on which we can still do useful math) instead of using a 16xi32
1252 * vector.
1253 * FIXME: some callers can't handle this yet.
1254 * For dims == 1 this will create [w0, w1, w2, w3, ...] vector.
1255 * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector.
1256 */
1257 assert(bld->num_mips == bld->coord_bld.type.length);
1258 if (bld->dims == 1) {
1259 assert(bld->int_size_in_bld.type.length == 1);
1260 int_size_vec = lp_build_broadcast_scalar(&bld->int_coord_bld,
1261 bld->int_size);
1262 /* vector shift with variable shift count alert... */
1263 *out_size = lp_build_minify(&bld->int_coord_bld, int_size_vec, ilevel);
1264 }
1265 else {
1266 LLVMValueRef ilevel1;
1267 for (i = 0; i < bld->num_mips; i++) {
1268 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1269 ilevel1 = lp_build_extract_broadcast(bld->gallivm, bld->int_coord_type,
1270 bld->int_size_in_bld.type, ilevel, indexi);
1271 tmp[i] = bld->int_size;
1272 tmp[i] = lp_build_minify(&bld->int_size_in_bld, tmp[i], ilevel1);
1273 }
1274 *out_size = lp_build_concat(bld->gallivm, tmp,
1275 bld->int_size_in_bld.type,
1276 bld->num_mips);
1277 }
1278 }
1279 }
1280
1281 if (dims >= 2) {
1282 *row_stride_vec = lp_build_get_level_stride_vec(bld,
1283 bld->row_stride_array,
1284 ilevel);
1285 }
1286 if (dims == 3 ||
1287 bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
1288 bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
1289 bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
1290 *img_stride_vec = lp_build_get_level_stride_vec(bld,
1291 bld->img_stride_array,
1292 ilevel);
1293 }
1294 }
1295
1296
1297 /**
1298 * Extract and broadcast texture size.
1299 *
1300 * @param size_type type of the texture size vector (either
1301 * bld->int_size_type or bld->float_size_type)
1302 * @param coord_type type of the texture size vector (either
1303 * bld->int_coord_type or bld->coord_type)
1304 * @param size vector with the texture size (width, height, depth)
1305 */
1306 void
1307 lp_build_extract_image_sizes(struct lp_build_sample_context *bld,
1308 struct lp_build_context *size_bld,
1309 struct lp_type coord_type,
1310 LLVMValueRef size,
1311 LLVMValueRef *out_width,
1312 LLVMValueRef *out_height,
1313 LLVMValueRef *out_depth)
1314 {
1315 const unsigned dims = bld->dims;
1316 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1317 struct lp_type size_type = size_bld->type;
1318
1319 if (bld->num_mips == 1) {
1320 *out_width = lp_build_extract_broadcast(bld->gallivm,
1321 size_type,
1322 coord_type,
1323 size,
1324 LLVMConstInt(i32t, 0, 0));
1325 if (dims >= 2) {
1326 *out_height = lp_build_extract_broadcast(bld->gallivm,
1327 size_type,
1328 coord_type,
1329 size,
1330 LLVMConstInt(i32t, 1, 0));
1331 if (dims == 3) {
1332 *out_depth = lp_build_extract_broadcast(bld->gallivm,
1333 size_type,
1334 coord_type,
1335 size,
1336 LLVMConstInt(i32t, 2, 0));
1337 }
1338 }
1339 }
1340 else {
1341 unsigned num_quads = bld->coord_bld.type.length / 4;
1342
1343 if (dims == 1) {
1344 *out_width = size;
1345 }
1346 else if (bld->num_mips == num_quads) {
1347 *out_width = lp_build_swizzle_scalar_aos(size_bld, size, 0, 4);
1348 if (dims >= 2) {
1349 *out_height = lp_build_swizzle_scalar_aos(size_bld, size, 1, 4);
1350 if (dims == 3) {
1351 *out_depth = lp_build_swizzle_scalar_aos(size_bld, size, 2, 4);
1352 }
1353 }
1354 }
1355 else {
1356 assert(bld->num_mips == bld->coord_type.length);
1357 *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1358 coord_type, size, 0);
1359 if (dims >= 2) {
1360 *out_height = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1361 coord_type, size, 1);
1362 if (dims == 3) {
1363 *out_depth = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1364 coord_type, size, 2);
1365 }
1366 }
1367 }
1368 }
1369 }
1370
1371
1372 /**
1373 * Unnormalize coords.
1374 *
1375 * @param flt_size vector with the integer texture size (width, height, depth)
1376 */
1377 void
1378 lp_build_unnormalized_coords(struct lp_build_sample_context *bld,
1379 LLVMValueRef flt_size,
1380 LLVMValueRef *s,
1381 LLVMValueRef *t,
1382 LLVMValueRef *r)
1383 {
1384 const unsigned dims = bld->dims;
1385 LLVMValueRef width;
1386 LLVMValueRef height;
1387 LLVMValueRef depth;
1388
1389 lp_build_extract_image_sizes(bld,
1390 &bld->float_size_bld,
1391 bld->coord_type,
1392 flt_size,
1393 &width,
1394 &height,
1395 &depth);
1396
1397 /* s = s * width, t = t * height */
1398 *s = lp_build_mul(&bld->coord_bld, *s, width);
1399 if (dims >= 2) {
1400 *t = lp_build_mul(&bld->coord_bld, *t, height);
1401 if (dims >= 3) {
1402 *r = lp_build_mul(&bld->coord_bld, *r, depth);
1403 }
1404 }
1405 }
1406
1407
1408 /** Helper used by lp_build_cube_lookup() */
1409 static LLVMValueRef
1410 lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord)
1411 {
1412 /* ima = +0.5 / abs(coord); */
1413 LLVMValueRef posHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1414 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1415 LLVMValueRef ima = lp_build_div(coord_bld, posHalf, absCoord);
1416 return ima;
1417 }
1418
1419 /** Helper used by lp_build_cube_lookup() */
1420 static LLVMValueRef
1421 lp_build_cube_imaneg(struct lp_build_context *coord_bld, LLVMValueRef coord)
1422 {
1423 /* ima = -0.5 / abs(coord); */
1424 LLVMValueRef negHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, -0.5);
1425 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1426 LLVMValueRef ima = lp_build_div(coord_bld, negHalf, absCoord);
1427 return ima;
1428 }
1429
1430 /**
1431 * Helper used by lp_build_cube_lookup()
1432 * FIXME: the sign here can also be 0.
1433 * Arithmetically this could definitely make a difference. Either
1434 * fix the comment or use other (simpler) sign function, not sure
1435 * which one it should be.
1436 * \param sign scalar +1 or -1
1437 * \param coord float vector
1438 * \param ima float vector
1439 */
1440 static LLVMValueRef
1441 lp_build_cube_coord(struct lp_build_context *coord_bld,
1442 LLVMValueRef sign, int negate_coord,
1443 LLVMValueRef coord, LLVMValueRef ima)
1444 {
1445 /* return negate(coord) * ima * sign + 0.5; */
1446 LLVMValueRef half = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1447 LLVMValueRef res;
1448
1449 assert(negate_coord == +1 || negate_coord == -1);
1450
1451 if (negate_coord == -1) {
1452 coord = lp_build_negate(coord_bld, coord);
1453 }
1454
1455 res = lp_build_mul(coord_bld, coord, ima);
1456 if (sign) {
1457 sign = lp_build_broadcast_scalar(coord_bld, sign);
1458 res = lp_build_mul(coord_bld, res, sign);
1459 }
1460 res = lp_build_add(coord_bld, res, half);
1461
1462 return res;
1463 }
1464
1465
1466 /** Helper used by lp_build_cube_lookup()
1467 * Return (major_coord >= 0) ? pos_face : neg_face;
1468 */
1469 static LLVMValueRef
1470 lp_build_cube_face(struct lp_build_sample_context *bld,
1471 LLVMValueRef major_coord,
1472 unsigned pos_face, unsigned neg_face)
1473 {
1474 struct gallivm_state *gallivm = bld->gallivm;
1475 LLVMBuilderRef builder = gallivm->builder;
1476 LLVMValueRef cmp = LLVMBuildFCmp(builder, LLVMRealUGE,
1477 major_coord,
1478 bld->float_bld.zero, "");
1479 LLVMValueRef pos = lp_build_const_int32(gallivm, pos_face);
1480 LLVMValueRef neg = lp_build_const_int32(gallivm, neg_face);
1481 LLVMValueRef res = LLVMBuildSelect(builder, cmp, pos, neg, "");
1482 return res;
1483 }
1484
1485
1486
1487 /**
1488 * Generate code to do cube face selection and compute per-face texcoords.
1489 */
1490 void
1491 lp_build_cube_lookup(struct lp_build_sample_context *bld,
1492 LLVMValueRef *coords,
1493 const struct lp_derivatives *derivs, /* optional */
1494 LLVMValueRef *rho,
1495 boolean need_derivs)
1496 {
1497 struct lp_build_context *coord_bld = &bld->coord_bld;
1498 LLVMBuilderRef builder = bld->gallivm->builder;
1499 struct gallivm_state *gallivm = bld->gallivm;
1500 LLVMValueRef si, ti, ri;
1501
1502 if (1 || coord_bld->type.length > 4) {
1503 /*
1504 * Do per-pixel face selection. We cannot however (as we used to do)
1505 * simply calculate the derivs afterwards (which is very bogus for
1506 * explicit derivs btw) because the values would be "random" when
1507 * not all pixels lie on the same face. So what we do here is just
1508 * calculate the derivatives after scaling the coords by the absolute
1509 * value of the inverse major axis, and essentially do rho calculation
1510 * steps as if it were a 3d texture. This is perfect if all pixels hit
1511 * the same face, but not so great at edges, I believe the max error
1512 * should be sqrt(2) with no_rho_approx or 2 otherwise (essentially measuring
1513 * the 3d distance between 2 points on the cube instead of measuring up/down
1514 * the edge). Still this is possibly a win over just selecting the same face
1515 * for all pixels. Unfortunately, something like that doesn't work for
1516 * explicit derivatives.
1517 * TODO: handle explicit derivatives by transforming them alongside coords
1518 * somehow.
1519 */
1520 struct lp_build_context *cint_bld = &bld->int_coord_bld;
1521 struct lp_type intctype = cint_bld->type;
1522 LLVMValueRef signs, signt, signr, signma;
1523 LLVMValueRef as, at, ar, face, face_s, face_t;
1524 LLVMValueRef as_ge_at, maxasat, ar_ge_as_at;
1525 LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz;
1526 LLVMValueRef tnegi, rnegi;
1527 LLVMValueRef ma, mai, ima;
1528 LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5);
1529 LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype,
1530 1 << (intctype.width - 1));
1531 LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype,
1532 intctype.width -1);
1533 LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X);
1534 LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y);
1535 LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z);
1536 LLVMValueRef s = coords[0];
1537 LLVMValueRef t = coords[1];
1538 LLVMValueRef r = coords[2];
1539
1540 assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1);
1541 assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1);
1542 assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1);
1543
1544 /*
1545 * get absolute value (for x/y/z face selection) and sign bit
1546 * (for mirroring minor coords and pos/neg face selection)
1547 * of the original coords.
1548 */
1549 as = lp_build_abs(&bld->coord_bld, s);
1550 at = lp_build_abs(&bld->coord_bld, t);
1551 ar = lp_build_abs(&bld->coord_bld, r);
1552
1553 /*
1554 * major face determination: select x if x > y else select y
1555 * select z if z >= max(x,y) else select previous result
1556 * if some axis are the same we chose z over y, y over x - the
1557 * dx10 spec seems to ask for it while OpenGL doesn't care (if we
1558 * wouldn't care could save a select or two if using different
1559 * compares and doing at_g_as_ar last since tnewx and tnewz are the
1560 * same).
1561 */
1562 as_ge_at = lp_build_cmp(coord_bld, PIPE_FUNC_GREATER, as, at);
1563 maxasat = lp_build_max(coord_bld, as, at);
1564 ar_ge_as_at = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, ar, maxasat);
1565
1566 if (need_derivs) {
1567 LLVMValueRef ddx_ddy[2], tmp[3], rho_vec;
1568 static const unsigned char swizzle0[] = { /* no-op swizzle */
1569 0, LP_BLD_SWIZZLE_DONTCARE,
1570 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1571 };
1572 static const unsigned char swizzle1[] = {
1573 1, LP_BLD_SWIZZLE_DONTCARE,
1574 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1575 };
1576 static const unsigned char swizzle01[] = { /* no-op swizzle */
1577 0, 1,
1578 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1579 };
1580 static const unsigned char swizzle23[] = {
1581 2, 3,
1582 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1583 };
1584 static const unsigned char swizzle02[] = {
1585 0, 2,
1586 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1587 };
1588
1589 /*
1590 * scale the s/t/r coords pre-select/mirror so we can calculate
1591 * "reasonable" derivs.
1592 */
1593 ma = lp_build_select(coord_bld, as_ge_at, s, t);
1594 ma = lp_build_select(coord_bld, ar_ge_as_at, r, ma);
1595 ima = lp_build_cube_imapos(coord_bld, ma);
1596 s = lp_build_mul(coord_bld, s, ima);
1597 t = lp_build_mul(coord_bld, t, ima);
1598 r = lp_build_mul(coord_bld, r, ima);
1599
1600 /*
1601 * This isn't quite the same as the "ordinary" (3d deriv) path since we
1602 * know the texture is square which simplifies things (we can omit the
1603 * size mul which happens very early completely here and do it at the
1604 * very end).
1605 */
1606 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
1607 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
1608
1609 if (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) {
1610 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
1611 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
1612 }
1613 else {
1614 ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
1615 ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
1616 }
1617
1618 tmp[0] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
1619 tmp[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
1620 tmp[2] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
1621
1622 if (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) {
1623 rho_vec = lp_build_add(coord_bld, tmp[0], tmp[1]);
1624 rho_vec = lp_build_add(coord_bld, rho_vec, tmp[2]);
1625 }
1626 else {
1627 rho_vec = lp_build_max(coord_bld, tmp[0], tmp[1]);
1628 rho_vec = lp_build_max(coord_bld, rho_vec, tmp[2]);
1629 }
1630
1631 tmp[0] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
1632 tmp[1] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
1633 *rho = lp_build_max(coord_bld, tmp[0], tmp[1]);
1634 }
1635
1636 si = LLVMBuildBitCast(builder, s, lp_build_vec_type(gallivm, intctype), "");
1637 ti = LLVMBuildBitCast(builder, t, lp_build_vec_type(gallivm, intctype), "");
1638 ri = LLVMBuildBitCast(builder, r, lp_build_vec_type(gallivm, intctype), "");
1639 signs = LLVMBuildAnd(builder, si, signmask, "");
1640 signt = LLVMBuildAnd(builder, ti, signmask, "");
1641 signr = LLVMBuildAnd(builder, ri, signmask, "");
1642
1643 /*
1644 * compute all possible new s/t coords
1645 * snewx = signs * -r;
1646 * tnewx = -t;
1647 * snewy = s;
1648 * tnewy = signt * r;
1649 * snewz = signr * s;
1650 * tnewz = -t;
1651 */
1652 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1653 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1654
1655 snewx = LLVMBuildXor(builder, signs, rnegi, "");
1656 tnewx = tnegi;
1657
1658 snewy = si;
1659 tnewy = LLVMBuildXor(builder, signt, ri, "");
1660
1661 snewz = LLVMBuildXor(builder, signr, si, "");
1662 tnewz = tnegi;
1663
1664 /* XXX on x86 unclear if we should cast the values back to float
1665 * or not - on some cpus (nehalem) pblendvb has twice the throughput
1666 * of blendvps though on others there just might be domain
1667 * transition penalties when using it (this depends on what llvm
1668 * will chose for the bit ops above so there appears no "right way",
1669 * but given the boatload of selects let's just use the int type).
1670 */
1671
1672 /* select/mirror */
1673 if (!need_derivs) {
1674 ma = lp_build_select(coord_bld, as_ge_at, s, t);
1675 }
1676 face_s = lp_build_select(cint_bld, as_ge_at, snewx, snewy);
1677 face_t = lp_build_select(cint_bld, as_ge_at, tnewx, tnewy);
1678 face = lp_build_select(cint_bld, as_ge_at, facex, facey);
1679
1680 if (!need_derivs) {
1681 ma = lp_build_select(coord_bld, ar_ge_as_at, r, ma);
1682 }
1683 face_s = lp_build_select(cint_bld, ar_ge_as_at, snewz, face_s);
1684 face_t = lp_build_select(cint_bld, ar_ge_as_at, tnewz, face_t);
1685 face = lp_build_select(cint_bld, ar_ge_as_at, facez, face);
1686
1687 face_s = LLVMBuildBitCast(builder, face_s,
1688 lp_build_vec_type(gallivm, coord_bld->type), "");
1689 face_t = LLVMBuildBitCast(builder, face_t,
1690 lp_build_vec_type(gallivm, coord_bld->type), "");
1691
1692 /* add +1 for neg face */
1693 /* XXX with AVX probably want to use another select here -
1694 * as long as we ensure vblendvps gets used we can actually
1695 * skip the comparison and just use sign as a "mask" directly.
1696 */
1697 mai = LLVMBuildBitCast(builder, ma, lp_build_vec_type(gallivm, intctype), "");
1698 signma = LLVMBuildLShr(builder, mai, signshift, "");
1699 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1700
1701 /* project coords */
1702 if (!need_derivs) {
1703 ima = lp_build_cube_imapos(coord_bld, ma);
1704 face_s = lp_build_mul(coord_bld, face_s, ima);
1705 face_t = lp_build_mul(coord_bld, face_t, ima);
1706 }
1707
1708 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
1709 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
1710 }
1711
1712 else {
1713 struct lp_build_if_state if_ctx;
1714 LLVMValueRef face_s_var;
1715 LLVMValueRef face_t_var;
1716 LLVMValueRef face_var;
1717 LLVMValueRef arx_ge_ary_arz, ary_ge_arx_arz;
1718 LLVMValueRef shuffles[4];
1719 LLVMValueRef arxy_ge_aryx, arxy_ge_arzz, arxy_ge_arxy_arzz;
1720 LLVMValueRef arxyxy, aryxzz, arxyxy_ge_aryxzz;
1721 LLVMValueRef tmp[4], rxyz, arxyz;
1722 struct lp_build_context *float_bld = &bld->float_bld;
1723 LLVMValueRef s, t, r, face, face_s, face_t;
1724
1725 assert(bld->coord_bld.type.length == 4);
1726
1727 tmp[0] = s = coords[0];
1728 tmp[1] = t = coords[1];
1729 tmp[2] = r = coords[2];
1730 rxyz = lp_build_hadd_partial4(&bld->coord_bld, tmp, 3);
1731 arxyz = lp_build_abs(&bld->coord_bld, rxyz);
1732
1733 shuffles[0] = lp_build_const_int32(gallivm, 0);
1734 shuffles[1] = lp_build_const_int32(gallivm, 1);
1735 shuffles[2] = lp_build_const_int32(gallivm, 0);
1736 shuffles[3] = lp_build_const_int32(gallivm, 1);
1737 arxyxy = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1738 shuffles[0] = lp_build_const_int32(gallivm, 1);
1739 shuffles[1] = lp_build_const_int32(gallivm, 0);
1740 shuffles[2] = lp_build_const_int32(gallivm, 2);
1741 shuffles[3] = lp_build_const_int32(gallivm, 2);
1742 aryxzz = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1743 arxyxy_ge_aryxzz = lp_build_cmp(&bld->coord_bld, PIPE_FUNC_GEQUAL, arxyxy, aryxzz);
1744
1745 shuffles[0] = lp_build_const_int32(gallivm, 0);
1746 shuffles[1] = lp_build_const_int32(gallivm, 1);
1747 arxy_ge_aryx = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1748 LLVMConstVector(shuffles, 2), "");
1749 shuffles[0] = lp_build_const_int32(gallivm, 2);
1750 shuffles[1] = lp_build_const_int32(gallivm, 3);
1751 arxy_ge_arzz = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1752 LLVMConstVector(shuffles, 2), "");
1753 arxy_ge_arxy_arzz = LLVMBuildAnd(builder, arxy_ge_aryx, arxy_ge_arzz, "");
1754
1755 arx_ge_ary_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1756 lp_build_const_int32(gallivm, 0), "");
1757 arx_ge_ary_arz = LLVMBuildICmp(builder, LLVMIntNE, arx_ge_ary_arz,
1758 lp_build_const_int32(gallivm, 0), "");
1759 ary_ge_arx_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1760 lp_build_const_int32(gallivm, 1), "");
1761 ary_ge_arx_arz = LLVMBuildICmp(builder, LLVMIntNE, ary_ge_arx_arz,
1762 lp_build_const_int32(gallivm, 0), "");
1763 face_s_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_s_var");
1764 face_t_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_t_var");
1765 face_var = lp_build_alloca(gallivm, bld->int_bld.vec_type, "face_var");
1766
1767 lp_build_if(&if_ctx, gallivm, arx_ge_ary_arz);
1768 {
1769 /* +/- X face */
1770 LLVMValueRef sign, ima;
1771 si = LLVMBuildExtractElement(builder, rxyz,
1772 lp_build_const_int32(gallivm, 0), "");
1773 /* +/- X face */
1774 sign = lp_build_sgn(float_bld, si);
1775 ima = lp_build_cube_imaneg(coord_bld, s);
1776 face_s = lp_build_cube_coord(coord_bld, sign, +1, r, ima);
1777 face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1778 face = lp_build_cube_face(bld, si,
1779 PIPE_TEX_FACE_POS_X,
1780 PIPE_TEX_FACE_NEG_X);
1781 LLVMBuildStore(builder, face_s, face_s_var);
1782 LLVMBuildStore(builder, face_t, face_t_var);
1783 LLVMBuildStore(builder, face, face_var);
1784 }
1785 lp_build_else(&if_ctx);
1786 {
1787 struct lp_build_if_state if_ctx2;
1788
1789 lp_build_if(&if_ctx2, gallivm, ary_ge_arx_arz);
1790 {
1791 LLVMValueRef sign, ima;
1792 /* +/- Y face */
1793 ti = LLVMBuildExtractElement(builder, rxyz,
1794 lp_build_const_int32(gallivm, 1), "");
1795 sign = lp_build_sgn(float_bld, ti);
1796 ima = lp_build_cube_imaneg(coord_bld, t);
1797 face_s = lp_build_cube_coord(coord_bld, NULL, -1, s, ima);
1798 face_t = lp_build_cube_coord(coord_bld, sign, -1, r, ima);
1799 face = lp_build_cube_face(bld, ti,
1800 PIPE_TEX_FACE_POS_Y,
1801 PIPE_TEX_FACE_NEG_Y);
1802 LLVMBuildStore(builder, face_s, face_s_var);
1803 LLVMBuildStore(builder, face_t, face_t_var);
1804 LLVMBuildStore(builder, face, face_var);
1805 }
1806 lp_build_else(&if_ctx2);
1807 {
1808 /* +/- Z face */
1809 LLVMValueRef sign, ima;
1810 ri = LLVMBuildExtractElement(builder, rxyz,
1811 lp_build_const_int32(gallivm, 2), "");
1812 sign = lp_build_sgn(float_bld, ri);
1813 ima = lp_build_cube_imaneg(coord_bld, r);
1814 face_s = lp_build_cube_coord(coord_bld, sign, -1, s, ima);
1815 face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1816 face = lp_build_cube_face(bld, ri,
1817 PIPE_TEX_FACE_POS_Z,
1818 PIPE_TEX_FACE_NEG_Z);
1819 LLVMBuildStore(builder, face_s, face_s_var);
1820 LLVMBuildStore(builder, face_t, face_t_var);
1821 LLVMBuildStore(builder, face, face_var);
1822 }
1823 lp_build_endif(&if_ctx2);
1824 }
1825
1826 lp_build_endif(&if_ctx);
1827
1828 coords[0] = LLVMBuildLoad(builder, face_s_var, "face_s");
1829 coords[1] = LLVMBuildLoad(builder, face_t_var, "face_t");
1830 face = LLVMBuildLoad(builder, face_var, "face");
1831 coords[2] = lp_build_broadcast_scalar(&bld->int_coord_bld, face);
1832 }
1833 }
1834
1835
1836 /**
1837 * Compute the partial offset of a pixel block along an arbitrary axis.
1838 *
1839 * @param coord coordinate in pixels
1840 * @param stride number of bytes between rows of successive pixel blocks
1841 * @param block_length number of pixels in a pixels block along the coordinate
1842 * axis
1843 * @param out_offset resulting relative offset of the pixel block in bytes
1844 * @param out_subcoord resulting sub-block pixel coordinate
1845 */
1846 void
1847 lp_build_sample_partial_offset(struct lp_build_context *bld,
1848 unsigned block_length,
1849 LLVMValueRef coord,
1850 LLVMValueRef stride,
1851 LLVMValueRef *out_offset,
1852 LLVMValueRef *out_subcoord)
1853 {
1854 LLVMBuilderRef builder = bld->gallivm->builder;
1855 LLVMValueRef offset;
1856 LLVMValueRef subcoord;
1857
1858 if (block_length == 1) {
1859 subcoord = bld->zero;
1860 }
1861 else {
1862 /*
1863 * Pixel blocks have power of two dimensions. LLVM should convert the
1864 * rem/div to bit arithmetic.
1865 * TODO: Verify this.
1866 * It does indeed BUT it does transform it to scalar (and back) when doing so
1867 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
1868 * The generated code looks seriously unfunny and is quite expensive.
1869 */
1870 #if 0
1871 LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length);
1872 subcoord = LLVMBuildURem(builder, coord, block_width, "");
1873 coord = LLVMBuildUDiv(builder, coord, block_width, "");
1874 #else
1875 unsigned logbase2 = util_logbase2(block_length);
1876 LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2);
1877 LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1);
1878 subcoord = LLVMBuildAnd(builder, coord, block_mask, "");
1879 coord = LLVMBuildLShr(builder, coord, block_shift, "");
1880 #endif
1881 }
1882
1883 offset = lp_build_mul(bld, coord, stride);
1884
1885 assert(out_offset);
1886 assert(out_subcoord);
1887
1888 *out_offset = offset;
1889 *out_subcoord = subcoord;
1890 }
1891
1892
1893 /**
1894 * Compute the offset of a pixel block.
1895 *
1896 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
1897 *
1898 * Returns the relative offset and i,j sub-block coordinates
1899 */
1900 void
1901 lp_build_sample_offset(struct lp_build_context *bld,
1902 const struct util_format_description *format_desc,
1903 LLVMValueRef x,
1904 LLVMValueRef y,
1905 LLVMValueRef z,
1906 LLVMValueRef y_stride,
1907 LLVMValueRef z_stride,
1908 LLVMValueRef *out_offset,
1909 LLVMValueRef *out_i,
1910 LLVMValueRef *out_j)
1911 {
1912 LLVMValueRef x_stride;
1913 LLVMValueRef offset;
1914
1915 x_stride = lp_build_const_vec(bld->gallivm, bld->type,
1916 format_desc->block.bits/8);
1917
1918 lp_build_sample_partial_offset(bld,
1919 format_desc->block.width,
1920 x, x_stride,
1921 &offset, out_i);
1922
1923 if (y && y_stride) {
1924 LLVMValueRef y_offset;
1925 lp_build_sample_partial_offset(bld,
1926 format_desc->block.height,
1927 y, y_stride,
1928 &y_offset, out_j);
1929 offset = lp_build_add(bld, offset, y_offset);
1930 }
1931 else {
1932 *out_j = bld->zero;
1933 }
1934
1935 if (z && z_stride) {
1936 LLVMValueRef z_offset;
1937 LLVMValueRef k;
1938 lp_build_sample_partial_offset(bld,
1939 1, /* pixel blocks are always 2D */
1940 z, z_stride,
1941 &z_offset, &k);
1942 offset = lp_build_add(bld, offset, z_offset);
1943 }
1944
1945 *out_offset = offset;
1946 }