tgsi: add info about MSAA samplers to tgsi_shader_info
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- common code.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_state.h"
37 #include "util/u_format.h"
38 #include "util/u_math.h"
39 #include "lp_bld_arit.h"
40 #include "lp_bld_const.h"
41 #include "lp_bld_debug.h"
42 #include "lp_bld_printf.h"
43 #include "lp_bld_flow.h"
44 #include "lp_bld_sample.h"
45 #include "lp_bld_swizzle.h"
46 #include "lp_bld_type.h"
47 #include "lp_bld_logic.h"
48 #include "lp_bld_pack.h"
49 #include "lp_bld_quad.h"
50 #include "lp_bld_bitarit.h"
51
52
53 /*
54 * Bri-linear factor. Should be greater than one.
55 */
56 #define BRILINEAR_FACTOR 2
57
58 /**
59 * Does the given texture wrap mode allow sampling the texture border color?
60 * XXX maybe move this into gallium util code.
61 */
62 boolean
63 lp_sampler_wrap_mode_uses_border_color(unsigned mode,
64 unsigned min_img_filter,
65 unsigned mag_img_filter)
66 {
67 switch (mode) {
68 case PIPE_TEX_WRAP_REPEAT:
69 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
70 case PIPE_TEX_WRAP_MIRROR_REPEAT:
71 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
72 return FALSE;
73 case PIPE_TEX_WRAP_CLAMP:
74 case PIPE_TEX_WRAP_MIRROR_CLAMP:
75 if (min_img_filter == PIPE_TEX_FILTER_NEAREST &&
76 mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
77 return FALSE;
78 } else {
79 return TRUE;
80 }
81 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
82 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
83 return TRUE;
84 default:
85 assert(0 && "unexpected wrap mode");
86 return FALSE;
87 }
88 }
89
90
91 /**
92 * Initialize lp_sampler_static_texture_state object with the gallium
93 * texture/sampler_view state (this contains the parts which are
94 * considered static).
95 */
96 void
97 lp_sampler_static_texture_state(struct lp_static_texture_state *state,
98 const struct pipe_sampler_view *view)
99 {
100 const struct pipe_resource *texture;
101
102 memset(state, 0, sizeof *state);
103
104 if (!view || !view->texture)
105 return;
106
107 texture = view->texture;
108
109 state->format = view->format;
110 state->swizzle_r = view->swizzle_r;
111 state->swizzle_g = view->swizzle_g;
112 state->swizzle_b = view->swizzle_b;
113 state->swizzle_a = view->swizzle_a;
114
115 state->target = texture->target;
116 state->pot_width = util_is_power_of_two(texture->width0);
117 state->pot_height = util_is_power_of_two(texture->height0);
118 state->pot_depth = util_is_power_of_two(texture->depth0);
119 state->level_zero_only = !view->u.tex.last_level;
120
121 /*
122 * the layer / element / level parameters are all either dynamic
123 * state or handled transparently wrt execution.
124 */
125 }
126
127
128 /**
129 * Initialize lp_sampler_static_sampler_state object with the gallium sampler
130 * state (this contains the parts which are considered static).
131 */
132 void
133 lp_sampler_static_sampler_state(struct lp_static_sampler_state *state,
134 const struct pipe_sampler_state *sampler)
135 {
136 memset(state, 0, sizeof *state);
137
138 if (!sampler)
139 return;
140
141 /*
142 * We don't copy sampler state over unless it is actually enabled, to avoid
143 * spurious recompiles, as the sampler static state is part of the shader
144 * key.
145 *
146 * Ideally the state tracker or cso_cache module would make all state
147 * canonical, but until that happens it's better to be safe than sorry here.
148 *
149 * XXX: Actually there's much more than can be done here, especially
150 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
151 */
152
153 state->wrap_s = sampler->wrap_s;
154 state->wrap_t = sampler->wrap_t;
155 state->wrap_r = sampler->wrap_r;
156 state->min_img_filter = sampler->min_img_filter;
157 state->mag_img_filter = sampler->mag_img_filter;
158
159 if (sampler->max_lod > 0.0f) {
160 state->min_mip_filter = sampler->min_mip_filter;
161 } else {
162 state->min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
163 }
164
165 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
166 if (sampler->lod_bias != 0.0f) {
167 state->lod_bias_non_zero = 1;
168 }
169
170 /* If min_lod == max_lod we can greatly simplify mipmap selection.
171 * This is a case that occurs during automatic mipmap generation.
172 */
173 if (sampler->min_lod == sampler->max_lod) {
174 state->min_max_lod_equal = 1;
175 } else {
176 if (sampler->min_lod > 0.0f) {
177 state->apply_min_lod = 1;
178 }
179
180 /*
181 * XXX this won't do anything with the mesa state tracker which always
182 * sets max_lod to not more than actually present mip maps...
183 */
184 if (sampler->max_lod < (PIPE_MAX_TEXTURE_LEVELS - 1)) {
185 state->apply_max_lod = 1;
186 }
187 }
188 }
189
190 state->compare_mode = sampler->compare_mode;
191 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) {
192 state->compare_func = sampler->compare_func;
193 }
194
195 state->normalized_coords = sampler->normalized_coords;
196 }
197
198
199 /**
200 * Generate code to compute coordinate gradient (rho).
201 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
202 *
203 * The resulting rho is scalar per quad.
204 */
205 static LLVMValueRef
206 lp_build_rho(struct lp_build_sample_context *bld,
207 unsigned texture_unit,
208 LLVMValueRef s,
209 LLVMValueRef t,
210 LLVMValueRef r,
211 LLVMValueRef cube_rho,
212 const struct lp_derivatives *derivs)
213 {
214 struct gallivm_state *gallivm = bld->gallivm;
215 struct lp_build_context *int_size_bld = &bld->int_size_in_bld;
216 struct lp_build_context *float_size_bld = &bld->float_size_in_bld;
217 struct lp_build_context *float_bld = &bld->float_bld;
218 struct lp_build_context *coord_bld = &bld->coord_bld;
219 struct lp_build_context *levelf_bld = &bld->levelf_bld;
220 const unsigned dims = bld->dims;
221 LLVMValueRef ddx_ddy[2];
222 LLVMBuilderRef builder = bld->gallivm->builder;
223 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
224 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
225 LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0);
226 LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0);
227 LLVMValueRef rho_vec;
228 LLVMValueRef int_size, float_size;
229 LLVMValueRef rho;
230 LLVMValueRef first_level, first_level_vec;
231 unsigned length = coord_bld->type.length;
232 unsigned num_quads = length / 4;
233 unsigned i;
234 LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
235 LLVMValueRef rho_xvec, rho_yvec;
236
237 /* Note that all simplified calculations will only work for isotropic filtering */
238
239 assert(bld->num_lods != length);
240
241 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
242 bld->gallivm, texture_unit);
243 first_level_vec = lp_build_broadcast_scalar(int_size_bld, first_level);
244 int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec);
245 float_size = lp_build_int_to_float(float_size_bld, int_size);
246
247 if (cube_rho) {
248 LLVMValueRef cubesize;
249 LLVMValueRef index0 = lp_build_const_int32(gallivm, 0);
250 /*
251 * Cube map code did already everything except size mul and per-quad extraction.
252 */
253 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
254 levelf_bld->type, cube_rho, 0);
255 if (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) {
256 rho = lp_build_sqrt(levelf_bld, rho);
257 }
258 /* Could optimize this for single quad just skip the broadcast */
259 cubesize = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
260 levelf_bld->type, float_size, index0);
261 rho = lp_build_mul(levelf_bld, cubesize, rho);
262 }
263 else if (derivs && !(bld->static_texture_state->target == PIPE_TEXTURE_CUBE)) {
264 LLVMValueRef ddmax[3], ddx[3], ddy[3];
265 for (i = 0; i < dims; i++) {
266 LLVMValueRef floatdim;
267 LLVMValueRef indexi = lp_build_const_int32(gallivm, i);
268
269 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
270 coord_bld->type, float_size, indexi);
271
272 if ((gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) && (dims > 1)) {
273 ddx[i] = lp_build_mul(coord_bld, floatdim, derivs->ddx[i]);
274 ddy[i] = lp_build_mul(coord_bld, floatdim, derivs->ddy[i]);
275 ddx[i] = lp_build_mul(coord_bld, ddx[i], ddx[i]);
276 ddy[i] = lp_build_mul(coord_bld, ddy[i], ddy[i]);
277 }
278 else {
279 LLVMValueRef tmpx, tmpy;
280 tmpx = lp_build_abs(coord_bld, derivs->ddx[i]);
281 tmpy = lp_build_abs(coord_bld, derivs->ddy[i]);
282 ddmax[i] = lp_build_max(coord_bld, tmpx, tmpy);
283 ddmax[i] = lp_build_mul(coord_bld, floatdim, ddmax[i]);
284 }
285 }
286 if ((gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) && (dims > 1)) {
287 rho_xvec = lp_build_add(coord_bld, ddx[0], ddx[1]);
288 rho_yvec = lp_build_add(coord_bld, ddy[0], ddy[1]);
289 if (dims > 2) {
290 rho_xvec = lp_build_add(coord_bld, rho_xvec, ddx[2]);
291 rho_yvec = lp_build_add(coord_bld, rho_yvec, ddy[2]);
292 }
293 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
294 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
295 levelf_bld->type, rho_vec, 0);
296 /*
297 * note that as long as we don't care about per-pixel lod could reduce math
298 * more (at some shuffle cost), but for now only do sqrt after packing.
299 */
300 rho = lp_build_sqrt(levelf_bld, rho);
301 }
302 else {
303 rho_vec = ddmax[0];
304 if (dims > 1) {
305 rho_vec = lp_build_max(coord_bld, rho_vec, ddmax[1]);
306 if (dims > 2) {
307 rho_vec = lp_build_max(coord_bld, rho_vec, ddmax[2]);
308 }
309 }
310 /*
311 * rho_vec now still contains per-pixel rho, convert to scalar per quad
312 * since we can't handle per-pixel rho/lod from now on (TODO).
313 */
314 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
315 levelf_bld->type, rho_vec, 0);
316 }
317 }
318 else {
319 /*
320 * This looks all a bit complex, but it's not that bad
321 * (the shuffle code makes it look worse than it is).
322 * Still, might not be ideal for all cases.
323 */
324 static const unsigned char swizzle0[] = { /* no-op swizzle */
325 0, LP_BLD_SWIZZLE_DONTCARE,
326 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
327 };
328 static const unsigned char swizzle1[] = {
329 1, LP_BLD_SWIZZLE_DONTCARE,
330 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
331 };
332 static const unsigned char swizzle2[] = {
333 2, LP_BLD_SWIZZLE_DONTCARE,
334 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
335 };
336
337 if (dims < 2) {
338 ddx_ddy[0] = lp_build_packed_ddx_ddy_onecoord(coord_bld, s);
339 }
340 else if (dims >= 2) {
341 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
342 if (dims > 2) {
343 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
344 }
345 }
346
347 if ((gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) && (dims > 1)) {
348 static const unsigned char swizzle01[] = { /* no-op swizzle */
349 0, 1,
350 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
351 };
352 static const unsigned char swizzle23[] = {
353 2, 3,
354 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
355 };
356 LLVMValueRef ddx_ddys, ddx_ddyt, floatdim, shuffles[LP_MAX_VECTOR_LENGTH / 4];
357
358 for (i = 0; i < num_quads; i++) {
359 shuffles[i*4+0] = shuffles[i*4+1] = index0;
360 shuffles[i*4+2] = shuffles[i*4+3] = index1;
361 }
362 floatdim = LLVMBuildShuffleVector(builder, float_size, float_size,
363 LLVMConstVector(shuffles, length), "");
364 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], floatdim);
365 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
366 ddx_ddys = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
367 ddx_ddyt = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
368 rho_vec = lp_build_add(coord_bld, ddx_ddys, ddx_ddyt);
369
370 if (dims > 2) {
371 static const unsigned char swizzle02[] = {
372 0, 2,
373 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
374 };
375 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
376 coord_bld->type, float_size, index2);
377 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], floatdim);
378 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
379 ddx_ddy[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
380 rho_vec = lp_build_add(coord_bld, rho_vec, ddx_ddy[1]);
381 }
382 rho_xvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
383 rho_yvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
384 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
385
386 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
387 levelf_bld->type, rho_vec, 0);
388 rho = lp_build_sqrt(levelf_bld, rho);
389 }
390 else {
391 ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
392 if (dims > 2) {
393 ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
394 }
395
396 if (dims < 2) {
397 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle0);
398 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle1);
399 }
400 else if (dims == 2) {
401 static const unsigned char swizzle02[] = {
402 0, 2,
403 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
404 };
405 static const unsigned char swizzle13[] = {
406 1, 3,
407 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
408 };
409 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle02);
410 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle13);
411 }
412 else {
413 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH];
414 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH];
415 assert(dims == 3);
416 for (i = 0; i < num_quads; i++) {
417 shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i);
418 shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2);
419 shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i);
420 shuffles1[4*i + 3] = i32undef;
421 shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1);
422 shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3);
423 shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 2);
424 shuffles2[4*i + 3] = i32undef;
425 }
426 rho_xvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
427 LLVMConstVector(shuffles1, length), "");
428 rho_yvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
429 LLVMConstVector(shuffles2, length), "");
430 }
431
432 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
433
434 if (bld->coord_type.length > 4) {
435 /* expand size to each quad */
436 if (dims > 1) {
437 /* could use some broadcast_vector helper for this? */
438 LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4];
439 for (i = 0; i < num_quads; i++) {
440 src[i] = float_size;
441 }
442 float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads);
443 }
444 else {
445 float_size = lp_build_broadcast_scalar(coord_bld, float_size);
446 }
447 rho_vec = lp_build_mul(coord_bld, rho_vec, float_size);
448
449 if (dims <= 1) {
450 rho = rho_vec;
451 }
452 else {
453 if (dims >= 2) {
454 LLVMValueRef rho_s, rho_t, rho_r;
455
456 rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
457 rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
458
459 rho = lp_build_max(coord_bld, rho_s, rho_t);
460
461 if (dims >= 3) {
462 rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2);
463 rho = lp_build_max(coord_bld, rho, rho_r);
464 }
465 }
466 }
467 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
468 levelf_bld->type, rho, 0);
469 }
470 else {
471 if (dims <= 1) {
472 rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, "");
473 }
474 rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size);
475
476 if (dims <= 1) {
477 rho = rho_vec;
478 }
479 else {
480 if (dims >= 2) {
481 LLVMValueRef rho_s, rho_t, rho_r;
482
483 rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, "");
484 rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, "");
485
486 rho = lp_build_max(float_bld, rho_s, rho_t);
487
488 if (dims >= 3) {
489 rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, "");
490 rho = lp_build_max(float_bld, rho, rho_r);
491 }
492 }
493 }
494 }
495 }
496 }
497
498 return rho;
499 }
500
501
502 /*
503 * Bri-linear lod computation
504 *
505 * Use a piece-wise linear approximation of log2 such that:
506 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
507 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
508 * with the steepness specified in 'factor'
509 * - exact result for 0.5, 1.5, etc.
510 *
511 *
512 * 1.0 - /----*
513 * /
514 * /
515 * /
516 * 0.5 - *
517 * /
518 * /
519 * /
520 * 0.0 - *----/
521 *
522 * | |
523 * 2^0 2^1
524 *
525 * This is a technique also commonly used in hardware:
526 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
527 *
528 * TODO: For correctness, this should only be applied when texture is known to
529 * have regular mipmaps, i.e., mipmaps derived from the base level.
530 *
531 * TODO: This could be done in fixed point, where applicable.
532 */
533 static void
534 lp_build_brilinear_lod(struct lp_build_context *bld,
535 LLVMValueRef lod,
536 double factor,
537 LLVMValueRef *out_lod_ipart,
538 LLVMValueRef *out_lod_fpart)
539 {
540 LLVMValueRef lod_fpart;
541 double pre_offset = (factor - 0.5)/factor - 0.5;
542 double post_offset = 1 - factor;
543
544 if (0) {
545 lp_build_printf(bld->gallivm, "lod = %f\n", lod);
546 }
547
548 lod = lp_build_add(bld, lod,
549 lp_build_const_vec(bld->gallivm, bld->type, pre_offset));
550
551 lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart);
552
553 lod_fpart = lp_build_mul(bld, lod_fpart,
554 lp_build_const_vec(bld->gallivm, bld->type, factor));
555
556 lod_fpart = lp_build_add(bld, lod_fpart,
557 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
558
559 /*
560 * It's not necessary to clamp lod_fpart since:
561 * - the above expression will never produce numbers greater than one.
562 * - the mip filtering branch is only taken if lod_fpart is positive
563 */
564
565 *out_lod_fpart = lod_fpart;
566
567 if (0) {
568 lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart);
569 lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart);
570 }
571 }
572
573
574 /*
575 * Combined log2 and brilinear lod computation.
576 *
577 * It's in all identical to calling lp_build_fast_log2() and
578 * lp_build_brilinear_lod() above, but by combining we can compute the integer
579 * and fractional part independently.
580 */
581 static void
582 lp_build_brilinear_rho(struct lp_build_context *bld,
583 LLVMValueRef rho,
584 double factor,
585 LLVMValueRef *out_lod_ipart,
586 LLVMValueRef *out_lod_fpart)
587 {
588 LLVMValueRef lod_ipart;
589 LLVMValueRef lod_fpart;
590
591 const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor);
592 const double post_offset = 1 - 2*factor;
593
594 assert(bld->type.floating);
595
596 assert(lp_check_value(bld->type, rho));
597
598 /*
599 * The pre factor will make the intersections with the exact powers of two
600 * happen precisely where we want then to be, which means that the integer
601 * part will not need any post adjustments.
602 */
603 rho = lp_build_mul(bld, rho,
604 lp_build_const_vec(bld->gallivm, bld->type, pre_factor));
605
606 /* ipart = ifloor(log2(rho)) */
607 lod_ipart = lp_build_extract_exponent(bld, rho, 0);
608
609 /* fpart = rho / 2**ipart */
610 lod_fpart = lp_build_extract_mantissa(bld, rho);
611
612 lod_fpart = lp_build_mul(bld, lod_fpart,
613 lp_build_const_vec(bld->gallivm, bld->type, factor));
614
615 lod_fpart = lp_build_add(bld, lod_fpart,
616 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
617
618 /*
619 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
620 * - the above expression will never produce numbers greater than one.
621 * - the mip filtering branch is only taken if lod_fpart is positive
622 */
623
624 *out_lod_ipart = lod_ipart;
625 *out_lod_fpart = lod_fpart;
626 }
627
628
629 /**
630 * Generate code to compute texture level of detail (lambda).
631 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
632 * \param lod_bias optional float vector with the shader lod bias
633 * \param explicit_lod optional float vector with the explicit lod
634 * \param width scalar int texture width
635 * \param height scalar int texture height
636 * \param depth scalar int texture depth
637 *
638 * The resulting lod is scalar per quad, so only the first value per quad
639 * passed in from lod_bias, explicit_lod is used.
640 */
641 void
642 lp_build_lod_selector(struct lp_build_sample_context *bld,
643 unsigned texture_unit,
644 unsigned sampler_unit,
645 LLVMValueRef s,
646 LLVMValueRef t,
647 LLVMValueRef r,
648 LLVMValueRef cube_rho,
649 const struct lp_derivatives *derivs,
650 LLVMValueRef lod_bias, /* optional */
651 LLVMValueRef explicit_lod, /* optional */
652 unsigned mip_filter,
653 LLVMValueRef *out_lod_ipart,
654 LLVMValueRef *out_lod_fpart)
655
656 {
657 LLVMBuilderRef builder = bld->gallivm->builder;
658 struct lp_build_context *levelf_bld = &bld->levelf_bld;
659 LLVMValueRef lod;
660
661 *out_lod_ipart = bld->leveli_bld.zero;
662 *out_lod_fpart = levelf_bld->zero;
663
664 if (bld->static_sampler_state->min_max_lod_equal) {
665 /* User is forcing sampling from a particular mipmap level.
666 * This is hit during mipmap generation.
667 */
668 LLVMValueRef min_lod =
669 bld->dynamic_state->min_lod(bld->dynamic_state,
670 bld->gallivm, sampler_unit);
671
672 lod = lp_build_broadcast_scalar(levelf_bld, min_lod);
673 }
674 else {
675 if (explicit_lod) {
676 if (bld->num_lods != bld->coord_type.length)
677 lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
678 levelf_bld->type, explicit_lod, 0);
679 else
680 lod = explicit_lod;
681 }
682 else {
683 LLVMValueRef rho;
684
685 rho = lp_build_rho(bld, texture_unit, s, t, r, cube_rho, derivs);
686
687 /*
688 * Compute lod = log2(rho)
689 */
690
691 if (!lod_bias &&
692 !bld->static_sampler_state->lod_bias_non_zero &&
693 !bld->static_sampler_state->apply_max_lod &&
694 !bld->static_sampler_state->apply_min_lod) {
695 /*
696 * Special case when there are no post-log2 adjustments, which
697 * saves instructions but keeping the integer and fractional lod
698 * computations separate from the start.
699 */
700
701 if (mip_filter == PIPE_TEX_MIPFILTER_NONE ||
702 mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
703 *out_lod_ipart = lp_build_ilog2(levelf_bld, rho);
704 *out_lod_fpart = levelf_bld->zero;
705 return;
706 }
707 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR &&
708 !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
709 lp_build_brilinear_rho(levelf_bld, rho, BRILINEAR_FACTOR,
710 out_lod_ipart, out_lod_fpart);
711 return;
712 }
713 }
714
715 if (0) {
716 lod = lp_build_log2(levelf_bld, rho);
717 }
718 else {
719 lod = lp_build_fast_log2(levelf_bld, rho);
720 }
721
722 /* add shader lod bias */
723 if (lod_bias) {
724 lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
725 levelf_bld->type, lod_bias, 0);
726 lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias");
727 }
728 }
729
730 /* add sampler lod bias */
731 if (bld->static_sampler_state->lod_bias_non_zero) {
732 LLVMValueRef sampler_lod_bias =
733 bld->dynamic_state->lod_bias(bld->dynamic_state,
734 bld->gallivm, sampler_unit);
735 sampler_lod_bias = lp_build_broadcast_scalar(levelf_bld,
736 sampler_lod_bias);
737 lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias");
738 }
739
740 /* clamp lod */
741 if (bld->static_sampler_state->apply_max_lod) {
742 LLVMValueRef max_lod =
743 bld->dynamic_state->max_lod(bld->dynamic_state,
744 bld->gallivm, sampler_unit);
745 max_lod = lp_build_broadcast_scalar(levelf_bld, max_lod);
746
747 lod = lp_build_min(levelf_bld, lod, max_lod);
748 }
749 if (bld->static_sampler_state->apply_min_lod) {
750 LLVMValueRef min_lod =
751 bld->dynamic_state->min_lod(bld->dynamic_state,
752 bld->gallivm, sampler_unit);
753 min_lod = lp_build_broadcast_scalar(levelf_bld, min_lod);
754
755 lod = lp_build_max(levelf_bld, lod, min_lod);
756 }
757 }
758
759 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
760 if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
761 lp_build_brilinear_lod(levelf_bld, lod, BRILINEAR_FACTOR,
762 out_lod_ipart, out_lod_fpart);
763 }
764 else {
765 lp_build_ifloor_fract(levelf_bld, lod, out_lod_ipart, out_lod_fpart);
766 }
767
768 lp_build_name(*out_lod_fpart, "lod_fpart");
769 }
770 else {
771 *out_lod_ipart = lp_build_iround(levelf_bld, lod);
772 }
773
774 lp_build_name(*out_lod_ipart, "lod_ipart");
775
776 return;
777 }
778
779
780 /**
781 * For PIPE_TEX_MIPFILTER_NEAREST, convert int part of lod
782 * to actual mip level.
783 * Note: this is all scalar per quad code.
784 * \param lod_ipart int texture level of detail
785 * \param level_out returns integer
786 * \param out_of_bounds returns per coord out_of_bounds mask if provided
787 */
788 void
789 lp_build_nearest_mip_level(struct lp_build_sample_context *bld,
790 unsigned texture_unit,
791 LLVMValueRef lod_ipart,
792 LLVMValueRef *level_out,
793 LLVMValueRef *out_of_bounds)
794 {
795 struct lp_build_context *leveli_bld = &bld->leveli_bld;
796 LLVMValueRef first_level, last_level, level;
797
798 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
799 bld->gallivm, texture_unit);
800 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
801 bld->gallivm, texture_unit);
802 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
803 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
804
805 level = lp_build_add(leveli_bld, lod_ipart, first_level);
806
807 if (out_of_bounds) {
808 LLVMValueRef out, out1;
809 out = lp_build_cmp(leveli_bld, PIPE_FUNC_LESS, level, first_level);
810 out1 = lp_build_cmp(leveli_bld, PIPE_FUNC_GREATER, level, last_level);
811 out = lp_build_or(leveli_bld, out, out1);
812 if (bld->num_lods == bld->coord_bld.type.length) {
813 *out_of_bounds = out;
814 }
815 else if (bld->num_lods == 1) {
816 *out_of_bounds = lp_build_broadcast_scalar(&bld->int_coord_bld, out);
817 }
818 else {
819 assert(bld->num_lods == bld->coord_bld.type.length / 4);
820 *out_of_bounds = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
821 leveli_bld->type,
822 bld->int_coord_bld.type,
823 out);
824 }
825 *level_out = level;
826 }
827 else {
828 /* clamp level to legal range of levels */
829 *level_out = lp_build_clamp(leveli_bld, level, first_level, last_level);
830
831 }
832 }
833
834
835 /**
836 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad int LOD(s) to two (per-quad)
837 * (adjacent) mipmap level indexes, and fix up float lod part accordingly.
838 * Later, we'll sample from those two mipmap levels and interpolate between them.
839 */
840 void
841 lp_build_linear_mip_levels(struct lp_build_sample_context *bld,
842 unsigned texture_unit,
843 LLVMValueRef lod_ipart,
844 LLVMValueRef *lod_fpart_inout,
845 LLVMValueRef *level0_out,
846 LLVMValueRef *level1_out)
847 {
848 LLVMBuilderRef builder = bld->gallivm->builder;
849 struct lp_build_context *leveli_bld = &bld->leveli_bld;
850 struct lp_build_context *levelf_bld = &bld->levelf_bld;
851 LLVMValueRef first_level, last_level;
852 LLVMValueRef clamp_min;
853 LLVMValueRef clamp_max;
854
855 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
856 bld->gallivm, texture_unit);
857 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
858 bld->gallivm, texture_unit);
859 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
860 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
861
862 *level0_out = lp_build_add(leveli_bld, lod_ipart, first_level);
863 *level1_out = lp_build_add(leveli_bld, *level0_out, leveli_bld->one);
864
865 /*
866 * Clamp both *level0_out and *level1_out to [first_level, last_level], with
867 * the minimum number of comparisons, and zeroing lod_fpart in the extreme
868 * ends in the process.
869 */
870
871 /*
872 * This code (vector select in particular) only works with llvm 3.1
873 * (if there's more than one quad, with x86 backend). Might consider
874 * converting to our lp_bld_logic helpers.
875 */
876 #if HAVE_LLVM < 0x0301
877 assert(leveli_bld->type.length == 1);
878 #endif
879
880 /* *level0_out < first_level */
881 clamp_min = LLVMBuildICmp(builder, LLVMIntSLT,
882 *level0_out, first_level,
883 "clamp_lod_to_first");
884
885 *level0_out = LLVMBuildSelect(builder, clamp_min,
886 first_level, *level0_out, "");
887
888 *level1_out = LLVMBuildSelect(builder, clamp_min,
889 first_level, *level1_out, "");
890
891 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min,
892 levelf_bld->zero, *lod_fpart_inout, "");
893
894 /* *level0_out >= last_level */
895 clamp_max = LLVMBuildICmp(builder, LLVMIntSGE,
896 *level0_out, last_level,
897 "clamp_lod_to_last");
898
899 *level0_out = LLVMBuildSelect(builder, clamp_max,
900 last_level, *level0_out, "");
901
902 *level1_out = LLVMBuildSelect(builder, clamp_max,
903 last_level, *level1_out, "");
904
905 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max,
906 levelf_bld->zero, *lod_fpart_inout, "");
907
908 lp_build_name(*level0_out, "texture%u_miplevel0", texture_unit);
909 lp_build_name(*level1_out, "texture%u_miplevel1", texture_unit);
910 lp_build_name(*lod_fpart_inout, "texture%u_mipweight", texture_unit);
911 }
912
913
914 /**
915 * Return pointer to a single mipmap level.
916 * \param level integer mipmap level
917 */
918 LLVMValueRef
919 lp_build_get_mipmap_level(struct lp_build_sample_context *bld,
920 LLVMValueRef level)
921 {
922 LLVMBuilderRef builder = bld->gallivm->builder;
923 LLVMValueRef indexes[2], data_ptr, mip_offset;
924
925 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
926 indexes[1] = level;
927 mip_offset = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
928 mip_offset = LLVMBuildLoad(builder, mip_offset, "");
929 data_ptr = LLVMBuildGEP(builder, bld->base_ptr, &mip_offset, 1, "");
930 return data_ptr;
931 }
932
933 /**
934 * Return (per-pixel) offsets to mip levels.
935 * \param level integer mipmap level
936 */
937 LLVMValueRef
938 lp_build_get_mip_offsets(struct lp_build_sample_context *bld,
939 LLVMValueRef level)
940 {
941 LLVMBuilderRef builder = bld->gallivm->builder;
942 LLVMValueRef indexes[2], offsets, offset1;
943
944 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
945 if (bld->num_lods == 1) {
946 indexes[1] = level;
947 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
948 offset1 = LLVMBuildLoad(builder, offset1, "");
949 offsets = lp_build_broadcast_scalar(&bld->int_coord_bld, offset1);
950 }
951 else if (bld->num_lods == bld->coord_bld.type.length / 4) {
952 unsigned i;
953
954 offsets = bld->int_coord_bld.undef;
955 for (i = 0; i < bld->num_lods; i++) {
956 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
957 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
958 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
959 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
960 offset1 = LLVMBuildLoad(builder, offset1, "");
961 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexo, "");
962 }
963 offsets = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, offsets, 0, 4);
964 }
965 else {
966 unsigned i;
967
968 assert (bld->num_lods == bld->coord_bld.type.length);
969
970 offsets = bld->int_coord_bld.undef;
971 for (i = 0; i < bld->num_lods; i++) {
972 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
973 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
974 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
975 offset1 = LLVMBuildLoad(builder, offset1, "");
976 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexi, "");
977 }
978 }
979 return offsets;
980 }
981
982
983 /**
984 * Codegen equivalent for u_minify().
985 * Return max(1, base_size >> level);
986 */
987 LLVMValueRef
988 lp_build_minify(struct lp_build_context *bld,
989 LLVMValueRef base_size,
990 LLVMValueRef level)
991 {
992 LLVMBuilderRef builder = bld->gallivm->builder;
993 assert(lp_check_value(bld->type, base_size));
994 assert(lp_check_value(bld->type, level));
995
996 if (level == bld->zero) {
997 /* if we're using mipmap level zero, no minification is needed */
998 return base_size;
999 }
1000 else {
1001 LLVMValueRef size =
1002 LLVMBuildLShr(builder, base_size, level, "minify");
1003 assert(bld->type.sign);
1004 size = lp_build_max(bld, size, bld->one);
1005 return size;
1006 }
1007 }
1008
1009
1010 /**
1011 * Dereference stride_array[mipmap_level] array to get a stride.
1012 * Return stride as a vector.
1013 */
1014 static LLVMValueRef
1015 lp_build_get_level_stride_vec(struct lp_build_sample_context *bld,
1016 LLVMValueRef stride_array, LLVMValueRef level)
1017 {
1018 LLVMBuilderRef builder = bld->gallivm->builder;
1019 LLVMValueRef indexes[2], stride, stride1;
1020 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1021 if (bld->num_lods == 1) {
1022 indexes[1] = level;
1023 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1024 stride1 = LLVMBuildLoad(builder, stride1, "");
1025 stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride1);
1026 }
1027 else if (bld->num_lods == bld->coord_bld.type.length / 4) {
1028 LLVMValueRef stride1;
1029 unsigned i;
1030
1031 stride = bld->int_coord_bld.undef;
1032 for (i = 0; i < bld->num_lods; i++) {
1033 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1034 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1035 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1036 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1037 stride1 = LLVMBuildLoad(builder, stride1, "");
1038 stride = LLVMBuildInsertElement(builder, stride, stride1, indexo, "");
1039 }
1040 stride = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, stride, 0, 4);
1041 }
1042 else {
1043 LLVMValueRef stride1;
1044 unsigned i;
1045
1046 assert (bld->num_lods == bld->coord_bld.type.length);
1047
1048 stride = bld->int_coord_bld.undef;
1049 for (i = 0; i < bld->coord_bld.type.length; i++) {
1050 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1051 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1052 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1053 stride1 = LLVMBuildLoad(builder, stride1, "");
1054 stride = LLVMBuildInsertElement(builder, stride, stride1, indexi, "");
1055 }
1056 }
1057 return stride;
1058 }
1059
1060
1061 /**
1062 * When sampling a mipmap, we need to compute the width, height, depth
1063 * of the source levels from the level indexes. This helper function
1064 * does that.
1065 */
1066 void
1067 lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld,
1068 LLVMValueRef ilevel,
1069 LLVMValueRef *out_size,
1070 LLVMValueRef *row_stride_vec,
1071 LLVMValueRef *img_stride_vec)
1072 {
1073 const unsigned dims = bld->dims;
1074 LLVMValueRef ilevel_vec;
1075
1076 /*
1077 * Compute width, height, depth at mipmap level 'ilevel'
1078 */
1079 if (bld->num_lods == 1) {
1080 ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel);
1081 *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec);
1082 }
1083 else {
1084 LLVMValueRef int_size_vec;
1085 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
1086 unsigned num_quads = bld->coord_bld.type.length / 4;
1087 unsigned i;
1088
1089 if (bld->num_lods == num_quads) {
1090 /*
1091 * XXX: this should be #ifndef SANE_INSTRUCTION_SET.
1092 * intel "forgot" the variable shift count instruction until avx2.
1093 * A harmless 8x32 shift gets translated into 32 instructions
1094 * (16 extracts, 8 scalar shifts, 8 inserts), llvm is apparently
1095 * unable to recognize if there are really just 2 different shift
1096 * count values. So do the shift 4-wide before expansion.
1097 */
1098 struct lp_build_context bld4;
1099 struct lp_type type4;
1100
1101 type4 = bld->int_coord_bld.type;
1102 type4.length = 4;
1103
1104 lp_build_context_init(&bld4, bld->gallivm, type4);
1105
1106 if (bld->dims == 1) {
1107 assert(bld->int_size_in_bld.type.length == 1);
1108 int_size_vec = lp_build_broadcast_scalar(&bld4,
1109 bld->int_size);
1110 }
1111 else {
1112 assert(bld->int_size_in_bld.type.length == 4);
1113 int_size_vec = bld->int_size;
1114 }
1115
1116 for (i = 0; i < num_quads; i++) {
1117 LLVMValueRef ileveli;
1118 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1119
1120 ileveli = lp_build_extract_broadcast(bld->gallivm,
1121 bld->leveli_bld.type,
1122 bld4.type,
1123 ilevel,
1124 indexi);
1125 tmp[i] = lp_build_minify(&bld4, int_size_vec, ileveli);
1126 }
1127 /*
1128 * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1,
1129 * [w0, w0, w0, w0, w1, w1, w1, w1, ...] otherwise.
1130 */
1131 *out_size = lp_build_concat(bld->gallivm,
1132 tmp,
1133 bld4.type,
1134 num_quads);
1135 }
1136 else {
1137 /* FIXME: this is terrible and results in _huge_ vector
1138 * (for the dims > 1 case).
1139 * Should refactor this (together with extract_image_sizes) and do
1140 * something more useful. Could for instance if we have width,height
1141 * with 4-wide vector pack all elements into a 8xi16 vector
1142 * (on which we can still do useful math) instead of using a 16xi32
1143 * vector.
1144 * FIXME: some callers can't handle this yet.
1145 * For dims == 1 this will create [w0, w1, w2, w3, ...] vector.
1146 * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector.
1147 */
1148 assert(bld->num_lods == bld->coord_bld.type.length);
1149 if (bld->dims == 1) {
1150 assert(bld->int_size_in_bld.type.length == 1);
1151 int_size_vec = lp_build_broadcast_scalar(&bld->int_coord_bld,
1152 bld->int_size);
1153 /* vector shift with variable shift count alert... */
1154 *out_size = lp_build_minify(&bld->int_coord_bld, int_size_vec, ilevel);
1155 }
1156 else {
1157 LLVMValueRef ilevel1;
1158 for (i = 0; i < bld->num_lods; i++) {
1159 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1160 ilevel1 = lp_build_extract_broadcast(bld->gallivm, bld->int_coord_type,
1161 bld->int_size_in_bld.type, ilevel, indexi);
1162 tmp[i] = bld->int_size;
1163 tmp[i] = lp_build_minify(&bld->int_size_in_bld, tmp[i], ilevel1);
1164 }
1165 *out_size = lp_build_concat(bld->gallivm, tmp,
1166 bld->int_size_in_bld.type,
1167 bld->num_lods);
1168 }
1169 }
1170 }
1171
1172 if (dims >= 2) {
1173 *row_stride_vec = lp_build_get_level_stride_vec(bld,
1174 bld->row_stride_array,
1175 ilevel);
1176 }
1177 if (dims == 3 ||
1178 bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
1179 bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
1180 bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
1181 *img_stride_vec = lp_build_get_level_stride_vec(bld,
1182 bld->img_stride_array,
1183 ilevel);
1184 }
1185 }
1186
1187
1188 /**
1189 * Extract and broadcast texture size.
1190 *
1191 * @param size_type type of the texture size vector (either
1192 * bld->int_size_type or bld->float_size_type)
1193 * @param coord_type type of the texture size vector (either
1194 * bld->int_coord_type or bld->coord_type)
1195 * @param size vector with the texture size (width, height, depth)
1196 */
1197 void
1198 lp_build_extract_image_sizes(struct lp_build_sample_context *bld,
1199 struct lp_build_context *size_bld,
1200 struct lp_type coord_type,
1201 LLVMValueRef size,
1202 LLVMValueRef *out_width,
1203 LLVMValueRef *out_height,
1204 LLVMValueRef *out_depth)
1205 {
1206 const unsigned dims = bld->dims;
1207 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1208 struct lp_type size_type = size_bld->type;
1209
1210 if (bld->num_lods == 1) {
1211 *out_width = lp_build_extract_broadcast(bld->gallivm,
1212 size_type,
1213 coord_type,
1214 size,
1215 LLVMConstInt(i32t, 0, 0));
1216 if (dims >= 2) {
1217 *out_height = lp_build_extract_broadcast(bld->gallivm,
1218 size_type,
1219 coord_type,
1220 size,
1221 LLVMConstInt(i32t, 1, 0));
1222 if (dims == 3) {
1223 *out_depth = lp_build_extract_broadcast(bld->gallivm,
1224 size_type,
1225 coord_type,
1226 size,
1227 LLVMConstInt(i32t, 2, 0));
1228 }
1229 }
1230 }
1231 else {
1232 unsigned num_quads = bld->coord_bld.type.length / 4;
1233
1234 if (dims == 1) {
1235 *out_width = size;
1236 }
1237 else if (bld->num_lods == num_quads) {
1238 *out_width = lp_build_swizzle_scalar_aos(size_bld, size, 0, 4);
1239 if (dims >= 2) {
1240 *out_height = lp_build_swizzle_scalar_aos(size_bld, size, 1, 4);
1241 if (dims == 3) {
1242 *out_depth = lp_build_swizzle_scalar_aos(size_bld, size, 2, 4);
1243 }
1244 }
1245 }
1246 else {
1247 assert(bld->num_lods == bld->coord_type.length);
1248 *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1249 coord_type, size, 0);
1250 if (dims >= 2) {
1251 *out_height = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1252 coord_type, size, 1);
1253 if (dims == 3) {
1254 *out_depth = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1255 coord_type, size, 2);
1256 }
1257 }
1258 }
1259 }
1260 }
1261
1262
1263 /**
1264 * Unnormalize coords.
1265 *
1266 * @param flt_size vector with the integer texture size (width, height, depth)
1267 */
1268 void
1269 lp_build_unnormalized_coords(struct lp_build_sample_context *bld,
1270 LLVMValueRef flt_size,
1271 LLVMValueRef *s,
1272 LLVMValueRef *t,
1273 LLVMValueRef *r)
1274 {
1275 const unsigned dims = bld->dims;
1276 LLVMValueRef width;
1277 LLVMValueRef height;
1278 LLVMValueRef depth;
1279
1280 lp_build_extract_image_sizes(bld,
1281 &bld->float_size_bld,
1282 bld->coord_type,
1283 flt_size,
1284 &width,
1285 &height,
1286 &depth);
1287
1288 /* s = s * width, t = t * height */
1289 *s = lp_build_mul(&bld->coord_bld, *s, width);
1290 if (dims >= 2) {
1291 *t = lp_build_mul(&bld->coord_bld, *t, height);
1292 if (dims >= 3) {
1293 *r = lp_build_mul(&bld->coord_bld, *r, depth);
1294 }
1295 }
1296 }
1297
1298
1299 /** Helper used by lp_build_cube_lookup() */
1300 static LLVMValueRef
1301 lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord)
1302 {
1303 /* ima = +0.5 / abs(coord); */
1304 LLVMValueRef posHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1305 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1306 LLVMValueRef ima = lp_build_div(coord_bld, posHalf, absCoord);
1307 return ima;
1308 }
1309
1310 /** Helper used by lp_build_cube_lookup() */
1311 static LLVMValueRef
1312 lp_build_cube_imaneg(struct lp_build_context *coord_bld, LLVMValueRef coord)
1313 {
1314 /* ima = -0.5 / abs(coord); */
1315 LLVMValueRef negHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, -0.5);
1316 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1317 LLVMValueRef ima = lp_build_div(coord_bld, negHalf, absCoord);
1318 return ima;
1319 }
1320
1321 /**
1322 * Helper used by lp_build_cube_lookup()
1323 * FIXME: the sign here can also be 0.
1324 * Arithmetically this could definitely make a difference. Either
1325 * fix the comment or use other (simpler) sign function, not sure
1326 * which one it should be.
1327 * \param sign scalar +1 or -1
1328 * \param coord float vector
1329 * \param ima float vector
1330 */
1331 static LLVMValueRef
1332 lp_build_cube_coord(struct lp_build_context *coord_bld,
1333 LLVMValueRef sign, int negate_coord,
1334 LLVMValueRef coord, LLVMValueRef ima)
1335 {
1336 /* return negate(coord) * ima * sign + 0.5; */
1337 LLVMValueRef half = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1338 LLVMValueRef res;
1339
1340 assert(negate_coord == +1 || negate_coord == -1);
1341
1342 if (negate_coord == -1) {
1343 coord = lp_build_negate(coord_bld, coord);
1344 }
1345
1346 res = lp_build_mul(coord_bld, coord, ima);
1347 if (sign) {
1348 sign = lp_build_broadcast_scalar(coord_bld, sign);
1349 res = lp_build_mul(coord_bld, res, sign);
1350 }
1351 res = lp_build_add(coord_bld, res, half);
1352
1353 return res;
1354 }
1355
1356
1357 /** Helper used by lp_build_cube_lookup()
1358 * Return (major_coord >= 0) ? pos_face : neg_face;
1359 */
1360 static LLVMValueRef
1361 lp_build_cube_face(struct lp_build_sample_context *bld,
1362 LLVMValueRef major_coord,
1363 unsigned pos_face, unsigned neg_face)
1364 {
1365 struct gallivm_state *gallivm = bld->gallivm;
1366 LLVMBuilderRef builder = gallivm->builder;
1367 LLVMValueRef cmp = LLVMBuildFCmp(builder, LLVMRealUGE,
1368 major_coord,
1369 bld->float_bld.zero, "");
1370 LLVMValueRef pos = lp_build_const_int32(gallivm, pos_face);
1371 LLVMValueRef neg = lp_build_const_int32(gallivm, neg_face);
1372 LLVMValueRef res = LLVMBuildSelect(builder, cmp, pos, neg, "");
1373 return res;
1374 }
1375
1376
1377
1378 /**
1379 * Generate code to do cube face selection and compute per-face texcoords.
1380 */
1381 void
1382 lp_build_cube_lookup(struct lp_build_sample_context *bld,
1383 LLVMValueRef *coords,
1384 const struct lp_derivatives *derivs, /* optional */
1385 LLVMValueRef *rho,
1386 boolean need_derivs)
1387 {
1388 struct lp_build_context *coord_bld = &bld->coord_bld;
1389 LLVMBuilderRef builder = bld->gallivm->builder;
1390 struct gallivm_state *gallivm = bld->gallivm;
1391 LLVMValueRef si, ti, ri;
1392
1393 if (1 || coord_bld->type.length > 4) {
1394 /*
1395 * Do per-pixel face selection. We cannot however (as we used to do)
1396 * simply calculate the derivs afterwards (which is very bogus for
1397 * explicit derivs btw) because the values would be "random" when
1398 * not all pixels lie on the same face. So what we do here is just
1399 * calculate the derivatives after scaling the coords by the absolute
1400 * value of the inverse major axis, and essentially do rho calculation
1401 * steps as if it were a 3d texture. This is perfect if all pixels hit
1402 * the same face, but not so great at edges, I believe the max error
1403 * should be sqrt(2) with no_rho_approx or 2 otherwise (essentially measuring
1404 * the 3d distance between 2 points on the cube instead of measuring up/down
1405 * the edge). Still this is possibly a win over just selecting the same face
1406 * for all pixels. Unfortunately, something like that doesn't work for
1407 * explicit derivatives.
1408 * TODO: handle explicit derivatives by transforming them alongside coords
1409 * somehow.
1410 */
1411 struct lp_build_context *cint_bld = &bld->int_coord_bld;
1412 struct lp_type intctype = cint_bld->type;
1413 LLVMValueRef signs, signt, signr, signma;
1414 LLVMValueRef as, at, ar, face, face_s, face_t;
1415 LLVMValueRef as_ge_at, maxasat, ar_ge_as_at;
1416 LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz;
1417 LLVMValueRef tnegi, rnegi;
1418 LLVMValueRef ma, mai, ima;
1419 LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5);
1420 LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype,
1421 1 << (intctype.width - 1));
1422 LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype,
1423 intctype.width -1);
1424 LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X);
1425 LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y);
1426 LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z);
1427 LLVMValueRef s = coords[0];
1428 LLVMValueRef t = coords[1];
1429 LLVMValueRef r = coords[2];
1430
1431 assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1);
1432 assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1);
1433 assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1);
1434
1435 /*
1436 * get absolute value (for x/y/z face selection) and sign bit
1437 * (for mirroring minor coords and pos/neg face selection)
1438 * of the original coords.
1439 */
1440 as = lp_build_abs(&bld->coord_bld, s);
1441 at = lp_build_abs(&bld->coord_bld, t);
1442 ar = lp_build_abs(&bld->coord_bld, r);
1443
1444 /*
1445 * major face determination: select x if x > y else select y
1446 * select z if z >= max(x,y) else select previous result
1447 * if some axis are the same we chose z over y, y over x - the
1448 * dx10 spec seems to ask for it while OpenGL doesn't care (if we
1449 * wouldn't care could save a select or two if using different
1450 * compares and doing at_g_as_ar last since tnewx and tnewz are the
1451 * same).
1452 */
1453 as_ge_at = lp_build_cmp(coord_bld, PIPE_FUNC_GREATER, as, at);
1454 maxasat = lp_build_max(coord_bld, as, at);
1455 ar_ge_as_at = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, ar, maxasat);
1456
1457 if (need_derivs) {
1458 LLVMValueRef ddx_ddy[2], tmp[3], rho_vec;
1459 static const unsigned char swizzle0[] = { /* no-op swizzle */
1460 0, LP_BLD_SWIZZLE_DONTCARE,
1461 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1462 };
1463 static const unsigned char swizzle1[] = {
1464 1, LP_BLD_SWIZZLE_DONTCARE,
1465 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1466 };
1467 static const unsigned char swizzle01[] = { /* no-op swizzle */
1468 0, 1,
1469 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1470 };
1471 static const unsigned char swizzle23[] = {
1472 2, 3,
1473 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1474 };
1475 static const unsigned char swizzle02[] = {
1476 0, 2,
1477 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1478 };
1479
1480 /*
1481 * scale the s/t/r coords pre-select/mirror so we can calculate
1482 * "reasonable" derivs.
1483 */
1484 ma = lp_build_select(coord_bld, as_ge_at, s, t);
1485 ma = lp_build_select(coord_bld, ar_ge_as_at, r, ma);
1486 ima = lp_build_cube_imapos(coord_bld, ma);
1487 s = lp_build_mul(coord_bld, s, ima);
1488 t = lp_build_mul(coord_bld, t, ima);
1489 r = lp_build_mul(coord_bld, r, ima);
1490
1491 /*
1492 * This isn't quite the same as the "ordinary" (3d deriv) path since we
1493 * know the texture is square which simplifies things (we can omit the
1494 * size mul which happens very early completely here and do it at the
1495 * very end).
1496 */
1497 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
1498 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
1499
1500 if (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) {
1501 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
1502 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
1503 }
1504 else {
1505 ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
1506 ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
1507 }
1508
1509 tmp[0] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
1510 tmp[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
1511 tmp[2] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
1512
1513 if (gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) {
1514 rho_vec = lp_build_add(coord_bld, tmp[0], tmp[1]);
1515 rho_vec = lp_build_add(coord_bld, rho_vec, tmp[2]);
1516 }
1517 else {
1518 rho_vec = lp_build_max(coord_bld, tmp[0], tmp[1]);
1519 rho_vec = lp_build_max(coord_bld, rho_vec, tmp[2]);
1520 }
1521
1522 tmp[0] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
1523 tmp[1] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
1524 *rho = lp_build_max(coord_bld, tmp[0], tmp[1]);
1525 }
1526
1527 si = LLVMBuildBitCast(builder, s, lp_build_vec_type(gallivm, intctype), "");
1528 ti = LLVMBuildBitCast(builder, t, lp_build_vec_type(gallivm, intctype), "");
1529 ri = LLVMBuildBitCast(builder, r, lp_build_vec_type(gallivm, intctype), "");
1530 signs = LLVMBuildAnd(builder, si, signmask, "");
1531 signt = LLVMBuildAnd(builder, ti, signmask, "");
1532 signr = LLVMBuildAnd(builder, ri, signmask, "");
1533
1534 /*
1535 * compute all possible new s/t coords
1536 * snewx = signs * -r;
1537 * tnewx = -t;
1538 * snewy = s;
1539 * tnewy = signt * r;
1540 * snewz = signr * s;
1541 * tnewz = -t;
1542 */
1543 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1544 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1545
1546 snewx = LLVMBuildXor(builder, signs, rnegi, "");
1547 tnewx = tnegi;
1548
1549 snewy = si;
1550 tnewy = LLVMBuildXor(builder, signt, ri, "");
1551
1552 snewz = LLVMBuildXor(builder, signr, si, "");
1553 tnewz = tnegi;
1554
1555 /* XXX on x86 unclear if we should cast the values back to float
1556 * or not - on some cpus (nehalem) pblendvb has twice the throughput
1557 * of blendvps though on others there just might be domain
1558 * transition penalties when using it (this depends on what llvm
1559 * will chose for the bit ops above so there appears no "right way",
1560 * but given the boatload of selects let's just use the int type).
1561 */
1562
1563 /* select/mirror */
1564 if (!need_derivs) {
1565 ma = lp_build_select(coord_bld, as_ge_at, s, t);
1566 }
1567 face_s = lp_build_select(cint_bld, as_ge_at, snewx, snewy);
1568 face_t = lp_build_select(cint_bld, as_ge_at, tnewx, tnewy);
1569 face = lp_build_select(cint_bld, as_ge_at, facex, facey);
1570
1571 if (!need_derivs) {
1572 ma = lp_build_select(coord_bld, ar_ge_as_at, r, ma);
1573 }
1574 face_s = lp_build_select(cint_bld, ar_ge_as_at, snewz, face_s);
1575 face_t = lp_build_select(cint_bld, ar_ge_as_at, tnewz, face_t);
1576 face = lp_build_select(cint_bld, ar_ge_as_at, facez, face);
1577
1578 face_s = LLVMBuildBitCast(builder, face_s,
1579 lp_build_vec_type(gallivm, coord_bld->type), "");
1580 face_t = LLVMBuildBitCast(builder, face_t,
1581 lp_build_vec_type(gallivm, coord_bld->type), "");
1582
1583 /* add +1 for neg face */
1584 /* XXX with AVX probably want to use another select here -
1585 * as long as we ensure vblendvps gets used we can actually
1586 * skip the comparison and just use sign as a "mask" directly.
1587 */
1588 mai = LLVMBuildBitCast(builder, ma, lp_build_vec_type(gallivm, intctype), "");
1589 signma = LLVMBuildLShr(builder, mai, signshift, "");
1590 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1591
1592 /* project coords */
1593 if (!need_derivs) {
1594 ima = lp_build_cube_imapos(coord_bld, ma);
1595 face_s = lp_build_mul(coord_bld, face_s, ima);
1596 face_t = lp_build_mul(coord_bld, face_t, ima);
1597 }
1598
1599 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
1600 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
1601 }
1602
1603 else {
1604 struct lp_build_if_state if_ctx;
1605 LLVMValueRef face_s_var;
1606 LLVMValueRef face_t_var;
1607 LLVMValueRef face_var;
1608 LLVMValueRef arx_ge_ary_arz, ary_ge_arx_arz;
1609 LLVMValueRef shuffles[4];
1610 LLVMValueRef arxy_ge_aryx, arxy_ge_arzz, arxy_ge_arxy_arzz;
1611 LLVMValueRef arxyxy, aryxzz, arxyxy_ge_aryxzz;
1612 LLVMValueRef tmp[4], rxyz, arxyz;
1613 struct lp_build_context *float_bld = &bld->float_bld;
1614 LLVMValueRef s, t, r, face, face_s, face_t;
1615
1616 assert(bld->coord_bld.type.length == 4);
1617
1618 tmp[0] = s = coords[0];
1619 tmp[1] = t = coords[1];
1620 tmp[2] = r = coords[2];
1621 rxyz = lp_build_hadd_partial4(&bld->coord_bld, tmp, 3);
1622 arxyz = lp_build_abs(&bld->coord_bld, rxyz);
1623
1624 shuffles[0] = lp_build_const_int32(gallivm, 0);
1625 shuffles[1] = lp_build_const_int32(gallivm, 1);
1626 shuffles[2] = lp_build_const_int32(gallivm, 0);
1627 shuffles[3] = lp_build_const_int32(gallivm, 1);
1628 arxyxy = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1629 shuffles[0] = lp_build_const_int32(gallivm, 1);
1630 shuffles[1] = lp_build_const_int32(gallivm, 0);
1631 shuffles[2] = lp_build_const_int32(gallivm, 2);
1632 shuffles[3] = lp_build_const_int32(gallivm, 2);
1633 aryxzz = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1634 arxyxy_ge_aryxzz = lp_build_cmp(&bld->coord_bld, PIPE_FUNC_GEQUAL, arxyxy, aryxzz);
1635
1636 shuffles[0] = lp_build_const_int32(gallivm, 0);
1637 shuffles[1] = lp_build_const_int32(gallivm, 1);
1638 arxy_ge_aryx = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1639 LLVMConstVector(shuffles, 2), "");
1640 shuffles[0] = lp_build_const_int32(gallivm, 2);
1641 shuffles[1] = lp_build_const_int32(gallivm, 3);
1642 arxy_ge_arzz = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1643 LLVMConstVector(shuffles, 2), "");
1644 arxy_ge_arxy_arzz = LLVMBuildAnd(builder, arxy_ge_aryx, arxy_ge_arzz, "");
1645
1646 arx_ge_ary_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1647 lp_build_const_int32(gallivm, 0), "");
1648 arx_ge_ary_arz = LLVMBuildICmp(builder, LLVMIntNE, arx_ge_ary_arz,
1649 lp_build_const_int32(gallivm, 0), "");
1650 ary_ge_arx_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1651 lp_build_const_int32(gallivm, 1), "");
1652 ary_ge_arx_arz = LLVMBuildICmp(builder, LLVMIntNE, ary_ge_arx_arz,
1653 lp_build_const_int32(gallivm, 0), "");
1654 face_s_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_s_var");
1655 face_t_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_t_var");
1656 face_var = lp_build_alloca(gallivm, bld->int_bld.vec_type, "face_var");
1657
1658 lp_build_if(&if_ctx, gallivm, arx_ge_ary_arz);
1659 {
1660 /* +/- X face */
1661 LLVMValueRef sign, ima;
1662 si = LLVMBuildExtractElement(builder, rxyz,
1663 lp_build_const_int32(gallivm, 0), "");
1664 /* +/- X face */
1665 sign = lp_build_sgn(float_bld, si);
1666 ima = lp_build_cube_imaneg(coord_bld, s);
1667 face_s = lp_build_cube_coord(coord_bld, sign, +1, r, ima);
1668 face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1669 face = lp_build_cube_face(bld, si,
1670 PIPE_TEX_FACE_POS_X,
1671 PIPE_TEX_FACE_NEG_X);
1672 LLVMBuildStore(builder, face_s, face_s_var);
1673 LLVMBuildStore(builder, face_t, face_t_var);
1674 LLVMBuildStore(builder, face, face_var);
1675 }
1676 lp_build_else(&if_ctx);
1677 {
1678 struct lp_build_if_state if_ctx2;
1679
1680 lp_build_if(&if_ctx2, gallivm, ary_ge_arx_arz);
1681 {
1682 LLVMValueRef sign, ima;
1683 /* +/- Y face */
1684 ti = LLVMBuildExtractElement(builder, rxyz,
1685 lp_build_const_int32(gallivm, 1), "");
1686 sign = lp_build_sgn(float_bld, ti);
1687 ima = lp_build_cube_imaneg(coord_bld, t);
1688 face_s = lp_build_cube_coord(coord_bld, NULL, -1, s, ima);
1689 face_t = lp_build_cube_coord(coord_bld, sign, -1, r, ima);
1690 face = lp_build_cube_face(bld, ti,
1691 PIPE_TEX_FACE_POS_Y,
1692 PIPE_TEX_FACE_NEG_Y);
1693 LLVMBuildStore(builder, face_s, face_s_var);
1694 LLVMBuildStore(builder, face_t, face_t_var);
1695 LLVMBuildStore(builder, face, face_var);
1696 }
1697 lp_build_else(&if_ctx2);
1698 {
1699 /* +/- Z face */
1700 LLVMValueRef sign, ima;
1701 ri = LLVMBuildExtractElement(builder, rxyz,
1702 lp_build_const_int32(gallivm, 2), "");
1703 sign = lp_build_sgn(float_bld, ri);
1704 ima = lp_build_cube_imaneg(coord_bld, r);
1705 face_s = lp_build_cube_coord(coord_bld, sign, -1, s, ima);
1706 face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1707 face = lp_build_cube_face(bld, ri,
1708 PIPE_TEX_FACE_POS_Z,
1709 PIPE_TEX_FACE_NEG_Z);
1710 LLVMBuildStore(builder, face_s, face_s_var);
1711 LLVMBuildStore(builder, face_t, face_t_var);
1712 LLVMBuildStore(builder, face, face_var);
1713 }
1714 lp_build_endif(&if_ctx2);
1715 }
1716
1717 lp_build_endif(&if_ctx);
1718
1719 coords[0] = LLVMBuildLoad(builder, face_s_var, "face_s");
1720 coords[1] = LLVMBuildLoad(builder, face_t_var, "face_t");
1721 face = LLVMBuildLoad(builder, face_var, "face");
1722 coords[2] = lp_build_broadcast_scalar(&bld->int_coord_bld, face);
1723 }
1724 }
1725
1726
1727 /**
1728 * Compute the partial offset of a pixel block along an arbitrary axis.
1729 *
1730 * @param coord coordinate in pixels
1731 * @param stride number of bytes between rows of successive pixel blocks
1732 * @param block_length number of pixels in a pixels block along the coordinate
1733 * axis
1734 * @param out_offset resulting relative offset of the pixel block in bytes
1735 * @param out_subcoord resulting sub-block pixel coordinate
1736 */
1737 void
1738 lp_build_sample_partial_offset(struct lp_build_context *bld,
1739 unsigned block_length,
1740 LLVMValueRef coord,
1741 LLVMValueRef stride,
1742 LLVMValueRef *out_offset,
1743 LLVMValueRef *out_subcoord)
1744 {
1745 LLVMBuilderRef builder = bld->gallivm->builder;
1746 LLVMValueRef offset;
1747 LLVMValueRef subcoord;
1748
1749 if (block_length == 1) {
1750 subcoord = bld->zero;
1751 }
1752 else {
1753 /*
1754 * Pixel blocks have power of two dimensions. LLVM should convert the
1755 * rem/div to bit arithmetic.
1756 * TODO: Verify this.
1757 * It does indeed BUT it does transform it to scalar (and back) when doing so
1758 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
1759 * The generated code looks seriously unfunny and is quite expensive.
1760 */
1761 #if 0
1762 LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length);
1763 subcoord = LLVMBuildURem(builder, coord, block_width, "");
1764 coord = LLVMBuildUDiv(builder, coord, block_width, "");
1765 #else
1766 unsigned logbase2 = util_logbase2(block_length);
1767 LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2);
1768 LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1);
1769 subcoord = LLVMBuildAnd(builder, coord, block_mask, "");
1770 coord = LLVMBuildLShr(builder, coord, block_shift, "");
1771 #endif
1772 }
1773
1774 offset = lp_build_mul(bld, coord, stride);
1775
1776 assert(out_offset);
1777 assert(out_subcoord);
1778
1779 *out_offset = offset;
1780 *out_subcoord = subcoord;
1781 }
1782
1783
1784 /**
1785 * Compute the offset of a pixel block.
1786 *
1787 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
1788 *
1789 * Returns the relative offset and i,j sub-block coordinates
1790 */
1791 void
1792 lp_build_sample_offset(struct lp_build_context *bld,
1793 const struct util_format_description *format_desc,
1794 LLVMValueRef x,
1795 LLVMValueRef y,
1796 LLVMValueRef z,
1797 LLVMValueRef y_stride,
1798 LLVMValueRef z_stride,
1799 LLVMValueRef *out_offset,
1800 LLVMValueRef *out_i,
1801 LLVMValueRef *out_j)
1802 {
1803 LLVMValueRef x_stride;
1804 LLVMValueRef offset;
1805
1806 x_stride = lp_build_const_vec(bld->gallivm, bld->type,
1807 format_desc->block.bits/8);
1808
1809 lp_build_sample_partial_offset(bld,
1810 format_desc->block.width,
1811 x, x_stride,
1812 &offset, out_i);
1813
1814 if (y && y_stride) {
1815 LLVMValueRef y_offset;
1816 lp_build_sample_partial_offset(bld,
1817 format_desc->block.height,
1818 y, y_stride,
1819 &y_offset, out_j);
1820 offset = lp_build_add(bld, offset, y_offset);
1821 }
1822 else {
1823 *out_j = bld->zero;
1824 }
1825
1826 if (z && z_stride) {
1827 LLVMValueRef z_offset;
1828 LLVMValueRef k;
1829 lp_build_sample_partial_offset(bld,
1830 1, /* pixel blocks are always 2D */
1831 z, z_stride,
1832 &z_offset, &k);
1833 offset = lp_build_add(bld, offset, z_offset);
1834 }
1835
1836 *out_offset = offset;
1837 }