util: Move gallium's PIPE_FORMAT utils to /util/format/
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- common code.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_state.h"
37 #include "util/format/u_format.h"
38 #include "util/u_math.h"
39 #include "util/u_cpu_detect.h"
40 #include "lp_bld_arit.h"
41 #include "lp_bld_const.h"
42 #include "lp_bld_debug.h"
43 #include "lp_bld_printf.h"
44 #include "lp_bld_flow.h"
45 #include "lp_bld_sample.h"
46 #include "lp_bld_swizzle.h"
47 #include "lp_bld_type.h"
48 #include "lp_bld_logic.h"
49 #include "lp_bld_pack.h"
50 #include "lp_bld_quad.h"
51 #include "lp_bld_bitarit.h"
52
53
54 /*
55 * Bri-linear factor. Should be greater than one.
56 */
57 #define BRILINEAR_FACTOR 2
58
59 /**
60 * Does the given texture wrap mode allow sampling the texture border color?
61 * XXX maybe move this into gallium util code.
62 */
63 boolean
64 lp_sampler_wrap_mode_uses_border_color(unsigned mode,
65 unsigned min_img_filter,
66 unsigned mag_img_filter)
67 {
68 switch (mode) {
69 case PIPE_TEX_WRAP_REPEAT:
70 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
71 case PIPE_TEX_WRAP_MIRROR_REPEAT:
72 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
73 return FALSE;
74 case PIPE_TEX_WRAP_CLAMP:
75 case PIPE_TEX_WRAP_MIRROR_CLAMP:
76 if (min_img_filter == PIPE_TEX_FILTER_NEAREST &&
77 mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
78 return FALSE;
79 } else {
80 return TRUE;
81 }
82 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
83 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
84 return TRUE;
85 default:
86 assert(0 && "unexpected wrap mode");
87 return FALSE;
88 }
89 }
90
91
92 /**
93 * Initialize lp_sampler_static_texture_state object with the gallium
94 * texture/sampler_view state (this contains the parts which are
95 * considered static).
96 */
97 void
98 lp_sampler_static_texture_state(struct lp_static_texture_state *state,
99 const struct pipe_sampler_view *view)
100 {
101 const struct pipe_resource *texture;
102
103 memset(state, 0, sizeof *state);
104
105 if (!view || !view->texture)
106 return;
107
108 texture = view->texture;
109
110 state->format = view->format;
111 state->swizzle_r = view->swizzle_r;
112 state->swizzle_g = view->swizzle_g;
113 state->swizzle_b = view->swizzle_b;
114 state->swizzle_a = view->swizzle_a;
115
116 state->target = view->target;
117 state->pot_width = util_is_power_of_two_or_zero(texture->width0);
118 state->pot_height = util_is_power_of_two_or_zero(texture->height0);
119 state->pot_depth = util_is_power_of_two_or_zero(texture->depth0);
120 state->level_zero_only = !view->u.tex.last_level;
121
122 /*
123 * the layer / element / level parameters are all either dynamic
124 * state or handled transparently wrt execution.
125 */
126 }
127
128 /**
129 * Initialize lp_sampler_static_texture_state object with the gallium
130 * texture/sampler_view state (this contains the parts which are
131 * considered static).
132 */
133 void
134 lp_sampler_static_texture_state_image(struct lp_static_texture_state *state,
135 const struct pipe_image_view *view)
136 {
137 const struct pipe_resource *resource;
138
139 memset(state, 0, sizeof *state);
140
141 if (!view || !view->resource)
142 return;
143
144 resource = view->resource;
145
146 state->format = view->format;
147 state->swizzle_r = PIPE_SWIZZLE_X;
148 state->swizzle_g = PIPE_SWIZZLE_Y;
149 state->swizzle_b = PIPE_SWIZZLE_Z;
150 state->swizzle_a = PIPE_SWIZZLE_W;
151
152 state->target = view->resource->target;
153 state->pot_width = util_is_power_of_two_or_zero(resource->width0);
154 state->pot_height = util_is_power_of_two_or_zero(resource->height0);
155 state->pot_depth = util_is_power_of_two_or_zero(resource->depth0);
156 state->level_zero_only = 0;
157
158 /*
159 * the layer / element / level parameters are all either dynamic
160 * state or handled transparently wrt execution.
161 */
162 }
163
164 /**
165 * Initialize lp_sampler_static_sampler_state object with the gallium sampler
166 * state (this contains the parts which are considered static).
167 */
168 void
169 lp_sampler_static_sampler_state(struct lp_static_sampler_state *state,
170 const struct pipe_sampler_state *sampler)
171 {
172 memset(state, 0, sizeof *state);
173
174 if (!sampler)
175 return;
176
177 /*
178 * We don't copy sampler state over unless it is actually enabled, to avoid
179 * spurious recompiles, as the sampler static state is part of the shader
180 * key.
181 *
182 * Ideally the state tracker or cso_cache module would make all state
183 * canonical, but until that happens it's better to be safe than sorry here.
184 *
185 * XXX: Actually there's much more than can be done here, especially
186 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
187 */
188
189 state->wrap_s = sampler->wrap_s;
190 state->wrap_t = sampler->wrap_t;
191 state->wrap_r = sampler->wrap_r;
192 state->min_img_filter = sampler->min_img_filter;
193 state->mag_img_filter = sampler->mag_img_filter;
194 state->min_mip_filter = sampler->min_mip_filter;
195 state->seamless_cube_map = sampler->seamless_cube_map;
196
197 if (sampler->max_lod > 0.0f) {
198 state->max_lod_pos = 1;
199 }
200
201 if (sampler->lod_bias != 0.0f) {
202 state->lod_bias_non_zero = 1;
203 }
204
205 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE ||
206 state->min_img_filter != state->mag_img_filter) {
207
208 /* If min_lod == max_lod we can greatly simplify mipmap selection.
209 * This is a case that occurs during automatic mipmap generation.
210 */
211 if (sampler->min_lod == sampler->max_lod) {
212 state->min_max_lod_equal = 1;
213 } else {
214 if (sampler->min_lod > 0.0f) {
215 state->apply_min_lod = 1;
216 }
217
218 /*
219 * XXX this won't do anything with the mesa state tracker which always
220 * sets max_lod to not more than actually present mip maps...
221 */
222 if (sampler->max_lod < (PIPE_MAX_TEXTURE_LEVELS - 1)) {
223 state->apply_max_lod = 1;
224 }
225 }
226 }
227
228 state->compare_mode = sampler->compare_mode;
229 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) {
230 state->compare_func = sampler->compare_func;
231 }
232
233 state->normalized_coords = sampler->normalized_coords;
234 }
235
236
237 /**
238 * Generate code to compute coordinate gradient (rho).
239 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
240 *
241 * The resulting rho has bld->levelf format (per quad or per element).
242 */
243 static LLVMValueRef
244 lp_build_rho(struct lp_build_sample_context *bld,
245 unsigned texture_unit,
246 LLVMValueRef s,
247 LLVMValueRef t,
248 LLVMValueRef r,
249 LLVMValueRef cube_rho,
250 const struct lp_derivatives *derivs)
251 {
252 struct gallivm_state *gallivm = bld->gallivm;
253 struct lp_build_context *int_size_bld = &bld->int_size_in_bld;
254 struct lp_build_context *float_size_bld = &bld->float_size_in_bld;
255 struct lp_build_context *float_bld = &bld->float_bld;
256 struct lp_build_context *coord_bld = &bld->coord_bld;
257 struct lp_build_context *rho_bld = &bld->lodf_bld;
258 const unsigned dims = bld->dims;
259 LLVMValueRef ddx_ddy[2] = {NULL};
260 LLVMBuilderRef builder = bld->gallivm->builder;
261 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
262 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
263 LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0);
264 LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0);
265 LLVMValueRef rho_vec;
266 LLVMValueRef int_size, float_size;
267 LLVMValueRef rho;
268 LLVMValueRef first_level, first_level_vec;
269 unsigned length = coord_bld->type.length;
270 unsigned num_quads = length / 4;
271 boolean rho_per_quad = rho_bld->type.length != length;
272 boolean no_rho_opt = bld->no_rho_approx && (dims > 1);
273 unsigned i;
274 LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
275 LLVMValueRef rho_xvec, rho_yvec;
276
277 /* Note that all simplified calculations will only work for isotropic filtering */
278
279 /*
280 * rho calcs are always per quad except for explicit derivs (excluding
281 * the messy cube maps for now) when requested.
282 */
283
284 first_level = bld->dynamic_state->first_level(bld->dynamic_state, bld->gallivm,
285 bld->context_ptr, texture_unit);
286 first_level_vec = lp_build_broadcast_scalar(int_size_bld, first_level);
287 int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec, TRUE);
288 float_size = lp_build_int_to_float(float_size_bld, int_size);
289
290 if (cube_rho) {
291 LLVMValueRef cubesize;
292 LLVMValueRef index0 = lp_build_const_int32(gallivm, 0);
293
294 /*
295 * Cube map code did already everything except size mul and per-quad extraction.
296 * Luckily cube maps are always quadratic!
297 */
298 if (rho_per_quad) {
299 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
300 rho_bld->type, cube_rho, 0);
301 }
302 else {
303 rho = lp_build_swizzle_scalar_aos(coord_bld, cube_rho, 0, 4);
304 }
305 /* Could optimize this for single quad just skip the broadcast */
306 cubesize = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
307 rho_bld->type, float_size, index0);
308 /* skipping sqrt hence returning rho squared */
309 cubesize = lp_build_mul(rho_bld, cubesize, cubesize);
310 rho = lp_build_mul(rho_bld, cubesize, rho);
311 }
312 else if (derivs) {
313 LLVMValueRef ddmax[3] = { NULL }, ddx[3] = { NULL }, ddy[3] = { NULL };
314 for (i = 0; i < dims; i++) {
315 LLVMValueRef floatdim;
316 LLVMValueRef indexi = lp_build_const_int32(gallivm, i);
317
318 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
319 coord_bld->type, float_size, indexi);
320
321 /*
322 * note that for rho_per_quad case could reduce math (at some shuffle
323 * cost), but for now use same code to per-pixel lod case.
324 */
325 if (no_rho_opt) {
326 ddx[i] = lp_build_mul(coord_bld, floatdim, derivs->ddx[i]);
327 ddy[i] = lp_build_mul(coord_bld, floatdim, derivs->ddy[i]);
328 ddx[i] = lp_build_mul(coord_bld, ddx[i], ddx[i]);
329 ddy[i] = lp_build_mul(coord_bld, ddy[i], ddy[i]);
330 }
331 else {
332 LLVMValueRef tmpx, tmpy;
333 tmpx = lp_build_abs(coord_bld, derivs->ddx[i]);
334 tmpy = lp_build_abs(coord_bld, derivs->ddy[i]);
335 ddmax[i] = lp_build_max(coord_bld, tmpx, tmpy);
336 ddmax[i] = lp_build_mul(coord_bld, floatdim, ddmax[i]);
337 }
338 }
339 if (no_rho_opt) {
340 rho_xvec = lp_build_add(coord_bld, ddx[0], ddx[1]);
341 rho_yvec = lp_build_add(coord_bld, ddy[0], ddy[1]);
342 if (dims > 2) {
343 rho_xvec = lp_build_add(coord_bld, rho_xvec, ddx[2]);
344 rho_yvec = lp_build_add(coord_bld, rho_yvec, ddy[2]);
345 }
346 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
347 /* skipping sqrt hence returning rho squared */
348 }
349 else {
350 rho = ddmax[0];
351 if (dims > 1) {
352 rho = lp_build_max(coord_bld, rho, ddmax[1]);
353 if (dims > 2) {
354 rho = lp_build_max(coord_bld, rho, ddmax[2]);
355 }
356 }
357 }
358 if (rho_per_quad) {
359 /*
360 * rho_vec contains per-pixel rho, convert to scalar per quad.
361 */
362 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
363 rho_bld->type, rho, 0);
364 }
365 }
366 else {
367 /*
368 * This looks all a bit complex, but it's not that bad
369 * (the shuffle code makes it look worse than it is).
370 * Still, might not be ideal for all cases.
371 */
372 static const unsigned char swizzle0[] = { /* no-op swizzle */
373 0, LP_BLD_SWIZZLE_DONTCARE,
374 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
375 };
376 static const unsigned char swizzle1[] = {
377 1, LP_BLD_SWIZZLE_DONTCARE,
378 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
379 };
380 static const unsigned char swizzle2[] = {
381 2, LP_BLD_SWIZZLE_DONTCARE,
382 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
383 };
384
385 if (dims < 2) {
386 ddx_ddy[0] = lp_build_packed_ddx_ddy_onecoord(coord_bld, s);
387 }
388 else if (dims >= 2) {
389 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
390 if (dims > 2) {
391 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
392 }
393 }
394
395 if (no_rho_opt) {
396 static const unsigned char swizzle01[] = { /* no-op swizzle */
397 0, 1,
398 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
399 };
400 static const unsigned char swizzle23[] = {
401 2, 3,
402 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
403 };
404 LLVMValueRef ddx_ddys, ddx_ddyt, floatdim, shuffles[LP_MAX_VECTOR_LENGTH / 4];
405
406 for (i = 0; i < num_quads; i++) {
407 shuffles[i*4+0] = shuffles[i*4+1] = index0;
408 shuffles[i*4+2] = shuffles[i*4+3] = index1;
409 }
410 floatdim = LLVMBuildShuffleVector(builder, float_size, float_size,
411 LLVMConstVector(shuffles, length), "");
412 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], floatdim);
413 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
414 ddx_ddys = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
415 ddx_ddyt = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
416 rho_vec = lp_build_add(coord_bld, ddx_ddys, ddx_ddyt);
417
418 if (dims > 2) {
419 static const unsigned char swizzle02[] = {
420 0, 2,
421 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
422 };
423 floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type,
424 coord_bld->type, float_size, index2);
425 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], floatdim);
426 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
427 ddx_ddy[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
428 rho_vec = lp_build_add(coord_bld, rho_vec, ddx_ddy[1]);
429 }
430
431 rho_xvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
432 rho_yvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
433 rho = lp_build_max(coord_bld, rho_xvec, rho_yvec);
434
435 if (rho_per_quad) {
436 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
437 rho_bld->type, rho, 0);
438 }
439 else {
440 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
441 }
442 /* skipping sqrt hence returning rho squared */
443 }
444 else {
445 ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
446 if (dims > 2) {
447 ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
448 }
449 else {
450 ddx_ddy[1] = NULL; /* silence compiler warning */
451 }
452
453 if (dims < 2) {
454 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle0);
455 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle2);
456 }
457 else if (dims == 2) {
458 static const unsigned char swizzle02[] = {
459 0, 2,
460 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
461 };
462 static const unsigned char swizzle13[] = {
463 1, 3,
464 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
465 };
466 rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle02);
467 rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle13);
468 }
469 else {
470 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH];
471 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH];
472 assert(dims == 3);
473 for (i = 0; i < num_quads; i++) {
474 shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i);
475 shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2);
476 shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i);
477 shuffles1[4*i + 3] = i32undef;
478 shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1);
479 shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3);
480 shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 2);
481 shuffles2[4*i + 3] = i32undef;
482 }
483 rho_xvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
484 LLVMConstVector(shuffles1, length), "");
485 rho_yvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1],
486 LLVMConstVector(shuffles2, length), "");
487 }
488
489 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
490
491 if (bld->coord_type.length > 4) {
492 /* expand size to each quad */
493 if (dims > 1) {
494 /* could use some broadcast_vector helper for this? */
495 LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4];
496 for (i = 0; i < num_quads; i++) {
497 src[i] = float_size;
498 }
499 float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads);
500 }
501 else {
502 float_size = lp_build_broadcast_scalar(coord_bld, float_size);
503 }
504 rho_vec = lp_build_mul(coord_bld, rho_vec, float_size);
505
506 if (dims <= 1) {
507 rho = rho_vec;
508 }
509 else {
510 if (dims >= 2) {
511 LLVMValueRef rho_s, rho_t, rho_r;
512
513 rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
514 rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
515
516 rho = lp_build_max(coord_bld, rho_s, rho_t);
517
518 if (dims >= 3) {
519 rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2);
520 rho = lp_build_max(coord_bld, rho, rho_r);
521 }
522 }
523 }
524 if (rho_per_quad) {
525 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
526 rho_bld->type, rho, 0);
527 }
528 else {
529 rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4);
530 }
531 }
532 else {
533 if (dims <= 1) {
534 rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, "");
535 }
536 rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size);
537
538 if (dims <= 1) {
539 rho = rho_vec;
540 }
541 else {
542 if (dims >= 2) {
543 LLVMValueRef rho_s, rho_t, rho_r;
544
545 rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, "");
546 rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, "");
547
548 rho = lp_build_max(float_bld, rho_s, rho_t);
549
550 if (dims >= 3) {
551 rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, "");
552 rho = lp_build_max(float_bld, rho, rho_r);
553 }
554 }
555 }
556 if (!rho_per_quad) {
557 rho = lp_build_broadcast_scalar(rho_bld, rho);
558 }
559 }
560 }
561 }
562
563 return rho;
564 }
565
566
567 /*
568 * Bri-linear lod computation
569 *
570 * Use a piece-wise linear approximation of log2 such that:
571 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
572 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
573 * with the steepness specified in 'factor'
574 * - exact result for 0.5, 1.5, etc.
575 *
576 *
577 * 1.0 - /----*
578 * /
579 * /
580 * /
581 * 0.5 - *
582 * /
583 * /
584 * /
585 * 0.0 - *----/
586 *
587 * | |
588 * 2^0 2^1
589 *
590 * This is a technique also commonly used in hardware:
591 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
592 *
593 * TODO: For correctness, this should only be applied when texture is known to
594 * have regular mipmaps, i.e., mipmaps derived from the base level.
595 *
596 * TODO: This could be done in fixed point, where applicable.
597 */
598 static void
599 lp_build_brilinear_lod(struct lp_build_context *bld,
600 LLVMValueRef lod,
601 double factor,
602 LLVMValueRef *out_lod_ipart,
603 LLVMValueRef *out_lod_fpart)
604 {
605 LLVMValueRef lod_fpart;
606 double pre_offset = (factor - 0.5)/factor - 0.5;
607 double post_offset = 1 - factor;
608
609 if (0) {
610 lp_build_printf(bld->gallivm, "lod = %f\n", lod);
611 }
612
613 lod = lp_build_add(bld, lod,
614 lp_build_const_vec(bld->gallivm, bld->type, pre_offset));
615
616 lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart);
617
618 lod_fpart = lp_build_mad(bld, lod_fpart,
619 lp_build_const_vec(bld->gallivm, bld->type, factor),
620 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
621
622 /*
623 * It's not necessary to clamp lod_fpart since:
624 * - the above expression will never produce numbers greater than one.
625 * - the mip filtering branch is only taken if lod_fpart is positive
626 */
627
628 *out_lod_fpart = lod_fpart;
629
630 if (0) {
631 lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart);
632 lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart);
633 }
634 }
635
636
637 /*
638 * Combined log2 and brilinear lod computation.
639 *
640 * It's in all identical to calling lp_build_fast_log2() and
641 * lp_build_brilinear_lod() above, but by combining we can compute the integer
642 * and fractional part independently.
643 */
644 static void
645 lp_build_brilinear_rho(struct lp_build_context *bld,
646 LLVMValueRef rho,
647 double factor,
648 LLVMValueRef *out_lod_ipart,
649 LLVMValueRef *out_lod_fpart)
650 {
651 LLVMValueRef lod_ipart;
652 LLVMValueRef lod_fpart;
653
654 const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor);
655 const double post_offset = 1 - 2*factor;
656
657 assert(bld->type.floating);
658
659 assert(lp_check_value(bld->type, rho));
660
661 /*
662 * The pre factor will make the intersections with the exact powers of two
663 * happen precisely where we want them to be, which means that the integer
664 * part will not need any post adjustments.
665 */
666 rho = lp_build_mul(bld, rho,
667 lp_build_const_vec(bld->gallivm, bld->type, pre_factor));
668
669 /* ipart = ifloor(log2(rho)) */
670 lod_ipart = lp_build_extract_exponent(bld, rho, 0);
671
672 /* fpart = rho / 2**ipart */
673 lod_fpart = lp_build_extract_mantissa(bld, rho);
674
675 lod_fpart = lp_build_mad(bld, lod_fpart,
676 lp_build_const_vec(bld->gallivm, bld->type, factor),
677 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
678
679 /*
680 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
681 * - the above expression will never produce numbers greater than one.
682 * - the mip filtering branch is only taken if lod_fpart is positive
683 */
684
685 *out_lod_ipart = lod_ipart;
686 *out_lod_fpart = lod_fpart;
687 }
688
689
690 /**
691 * Fast implementation of iround(log2(sqrt(x))), based on
692 * log2(x^n) == n*log2(x).
693 *
694 * Gives accurate results all the time.
695 * (Could be trivially extended to handle other power-of-two roots.)
696 */
697 static LLVMValueRef
698 lp_build_ilog2_sqrt(struct lp_build_context *bld,
699 LLVMValueRef x)
700 {
701 LLVMBuilderRef builder = bld->gallivm->builder;
702 LLVMValueRef ipart;
703 struct lp_type i_type = lp_int_type(bld->type);
704 LLVMValueRef one = lp_build_const_int_vec(bld->gallivm, i_type, 1);
705
706 assert(bld->type.floating);
707
708 assert(lp_check_value(bld->type, x));
709
710 /* ipart = log2(x) + 0.5 = 0.5*(log2(x^2) + 1.0) */
711 ipart = lp_build_extract_exponent(bld, x, 1);
712 ipart = LLVMBuildAShr(builder, ipart, one, "");
713
714 return ipart;
715 }
716
717
718 /**
719 * Generate code to compute texture level of detail (lambda).
720 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
721 * \param lod_bias optional float vector with the shader lod bias
722 * \param explicit_lod optional float vector with the explicit lod
723 * \param cube_rho rho calculated by cube coord mapping (optional)
724 * \param out_lod_ipart integer part of lod
725 * \param out_lod_fpart float part of lod (never larger than 1 but may be negative)
726 * \param out_lod_positive (mask) if lod is positive (i.e. texture is minified)
727 *
728 * The resulting lod can be scalar per quad or be per element.
729 */
730 void
731 lp_build_lod_selector(struct lp_build_sample_context *bld,
732 boolean is_lodq,
733 unsigned texture_unit,
734 unsigned sampler_unit,
735 LLVMValueRef s,
736 LLVMValueRef t,
737 LLVMValueRef r,
738 LLVMValueRef cube_rho,
739 const struct lp_derivatives *derivs,
740 LLVMValueRef lod_bias, /* optional */
741 LLVMValueRef explicit_lod, /* optional */
742 unsigned mip_filter,
743 LLVMValueRef *out_lod,
744 LLVMValueRef *out_lod_ipart,
745 LLVMValueRef *out_lod_fpart,
746 LLVMValueRef *out_lod_positive)
747
748 {
749 LLVMBuilderRef builder = bld->gallivm->builder;
750 struct lp_sampler_dynamic_state *dynamic_state = bld->dynamic_state;
751 struct lp_build_context *lodf_bld = &bld->lodf_bld;
752 LLVMValueRef lod;
753
754 *out_lod_ipart = bld->lodi_bld.zero;
755 *out_lod_positive = bld->lodi_bld.zero;
756 *out_lod_fpart = lodf_bld->zero;
757
758 /*
759 * For determining min/mag, we follow GL 4.1 spec, 3.9.12 Texture Magnification:
760 * "Implementations may either unconditionally assume c = 0 for the minification
761 * vs. magnification switch-over point, or may choose to make c depend on the
762 * combination of minification and magnification modes as follows: if the
763 * magnification filter is given by LINEAR and the minification filter is given
764 * by NEAREST_MIPMAP_NEAREST or NEAREST_MIPMAP_LINEAR, then c = 0.5. This is
765 * done to ensure that a minified texture does not appear "sharper" than a
766 * magnified texture. Otherwise c = 0."
767 * And 3.9.11 Texture Minification:
768 * "If lod is less than or equal to the constant c (see section 3.9.12) the
769 * texture is said to be magnified; if it is greater, the texture is minified."
770 * So, using 0 as switchover point always, and using magnification for lod == 0.
771 * Note that the always c = 0 behavior is new (first appearing in GL 3.1 spec),
772 * old GL versions required 0.5 for the modes listed above.
773 * I have no clue about the (undocumented) wishes of d3d9/d3d10 here!
774 */
775
776 if (bld->static_sampler_state->min_max_lod_equal && !is_lodq) {
777 /* User is forcing sampling from a particular mipmap level.
778 * This is hit during mipmap generation.
779 */
780 LLVMValueRef min_lod =
781 dynamic_state->min_lod(dynamic_state, bld->gallivm,
782 bld->context_ptr, sampler_unit);
783
784 lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
785 }
786 else {
787 if (explicit_lod) {
788 if (bld->num_lods != bld->coord_type.length)
789 lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
790 lodf_bld->type, explicit_lod, 0);
791 else
792 lod = explicit_lod;
793 }
794 else {
795 LLVMValueRef rho;
796 boolean rho_squared = (bld->no_rho_approx &&
797 (bld->dims > 1)) || cube_rho;
798
799 rho = lp_build_rho(bld, texture_unit, s, t, r, cube_rho, derivs);
800
801 /*
802 * Compute lod = log2(rho)
803 */
804
805 if (!lod_bias && !is_lodq &&
806 !bld->static_sampler_state->lod_bias_non_zero &&
807 !bld->static_sampler_state->apply_max_lod &&
808 !bld->static_sampler_state->apply_min_lod) {
809 /*
810 * Special case when there are no post-log2 adjustments, which
811 * saves instructions but keeping the integer and fractional lod
812 * computations separate from the start.
813 */
814
815 if (mip_filter == PIPE_TEX_MIPFILTER_NONE ||
816 mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
817 /*
818 * Don't actually need both values all the time, lod_ipart is
819 * needed for nearest mipfilter, lod_positive if min != mag.
820 */
821 if (rho_squared) {
822 *out_lod_ipart = lp_build_ilog2_sqrt(lodf_bld, rho);
823 }
824 else {
825 *out_lod_ipart = lp_build_ilog2(lodf_bld, rho);
826 }
827 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
828 rho, lodf_bld->one);
829 return;
830 }
831 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR &&
832 !bld->no_brilinear && !rho_squared) {
833 /*
834 * This can't work if rho is squared. Not sure if it could be
835 * fixed while keeping it worthwile, could also do sqrt here
836 * but brilinear and no_rho_opt seems like a combination not
837 * making much sense anyway so just use ordinary path below.
838 */
839 lp_build_brilinear_rho(lodf_bld, rho, BRILINEAR_FACTOR,
840 out_lod_ipart, out_lod_fpart);
841 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
842 rho, lodf_bld->one);
843 return;
844 }
845 }
846
847 if (0) {
848 lod = lp_build_log2(lodf_bld, rho);
849 }
850 else {
851 lod = lp_build_fast_log2(lodf_bld, rho);
852 }
853 if (rho_squared) {
854 /* log2(x^2) == 0.5*log2(x) */
855 lod = lp_build_mul(lodf_bld, lod,
856 lp_build_const_vec(bld->gallivm, lodf_bld->type, 0.5F));
857 }
858
859 /* add shader lod bias */
860 if (lod_bias) {
861 if (bld->num_lods != bld->coord_type.length)
862 lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
863 lodf_bld->type, lod_bias, 0);
864 lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias");
865 }
866 }
867
868 /* add sampler lod bias */
869 if (bld->static_sampler_state->lod_bias_non_zero) {
870 LLVMValueRef sampler_lod_bias =
871 dynamic_state->lod_bias(dynamic_state, bld->gallivm,
872 bld->context_ptr, sampler_unit);
873 sampler_lod_bias = lp_build_broadcast_scalar(lodf_bld,
874 sampler_lod_bias);
875 lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias");
876 }
877
878 if (is_lodq) {
879 *out_lod = lod;
880 }
881
882 /* clamp lod */
883 if (bld->static_sampler_state->apply_max_lod) {
884 LLVMValueRef max_lod =
885 dynamic_state->max_lod(dynamic_state, bld->gallivm,
886 bld->context_ptr, sampler_unit);
887 max_lod = lp_build_broadcast_scalar(lodf_bld, max_lod);
888
889 lod = lp_build_min(lodf_bld, lod, max_lod);
890 }
891 if (bld->static_sampler_state->apply_min_lod) {
892 LLVMValueRef min_lod =
893 dynamic_state->min_lod(dynamic_state, bld->gallivm,
894 bld->context_ptr, sampler_unit);
895 min_lod = lp_build_broadcast_scalar(lodf_bld, min_lod);
896
897 lod = lp_build_max(lodf_bld, lod, min_lod);
898 }
899
900 if (is_lodq) {
901 *out_lod_fpart = lod;
902 return;
903 }
904 }
905
906 *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER,
907 lod, lodf_bld->zero);
908
909 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
910 if (!bld->no_brilinear) {
911 lp_build_brilinear_lod(lodf_bld, lod, BRILINEAR_FACTOR,
912 out_lod_ipart, out_lod_fpart);
913 }
914 else {
915 lp_build_ifloor_fract(lodf_bld, lod, out_lod_ipart, out_lod_fpart);
916 }
917
918 lp_build_name(*out_lod_fpart, "lod_fpart");
919 }
920 else {
921 *out_lod_ipart = lp_build_iround(lodf_bld, lod);
922 }
923
924 lp_build_name(*out_lod_ipart, "lod_ipart");
925
926 return;
927 }
928
929
930 /**
931 * For PIPE_TEX_MIPFILTER_NEAREST, convert int part of lod
932 * to actual mip level.
933 * Note: this is all scalar per quad code.
934 * \param lod_ipart int texture level of detail
935 * \param level_out returns integer
936 * \param out_of_bounds returns per coord out_of_bounds mask if provided
937 */
938 void
939 lp_build_nearest_mip_level(struct lp_build_sample_context *bld,
940 unsigned texture_unit,
941 LLVMValueRef lod_ipart,
942 LLVMValueRef *level_out,
943 LLVMValueRef *out_of_bounds)
944 {
945 struct lp_build_context *leveli_bld = &bld->leveli_bld;
946 struct lp_sampler_dynamic_state *dynamic_state = bld->dynamic_state;
947 LLVMValueRef first_level, last_level, level;
948
949 first_level = dynamic_state->first_level(dynamic_state, bld->gallivm,
950 bld->context_ptr, texture_unit);
951 last_level = dynamic_state->last_level(dynamic_state, bld->gallivm,
952 bld->context_ptr, texture_unit);
953 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
954 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
955
956 level = lp_build_add(leveli_bld, lod_ipart, first_level);
957
958 if (out_of_bounds) {
959 LLVMValueRef out, out1;
960 out = lp_build_cmp(leveli_bld, PIPE_FUNC_LESS, level, first_level);
961 out1 = lp_build_cmp(leveli_bld, PIPE_FUNC_GREATER, level, last_level);
962 out = lp_build_or(leveli_bld, out, out1);
963 if (bld->num_mips == bld->coord_bld.type.length) {
964 *out_of_bounds = out;
965 }
966 else if (bld->num_mips == 1) {
967 *out_of_bounds = lp_build_broadcast_scalar(&bld->int_coord_bld, out);
968 }
969 else {
970 assert(bld->num_mips == bld->coord_bld.type.length / 4);
971 *out_of_bounds = lp_build_unpack_broadcast_aos_scalars(bld->gallivm,
972 leveli_bld->type,
973 bld->int_coord_bld.type,
974 out);
975 }
976 level = lp_build_andnot(&bld->int_coord_bld, level, *out_of_bounds);
977 *level_out = level;
978 }
979 else {
980 /* clamp level to legal range of levels */
981 *level_out = lp_build_clamp(leveli_bld, level, first_level, last_level);
982
983 }
984 }
985
986
987 /**
988 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad (or per element) int LOD(s)
989 * to two (per-quad) (adjacent) mipmap level indexes, and fix up float lod
990 * part accordingly.
991 * Later, we'll sample from those two mipmap levels and interpolate between them.
992 */
993 void
994 lp_build_linear_mip_levels(struct lp_build_sample_context *bld,
995 unsigned texture_unit,
996 LLVMValueRef lod_ipart,
997 LLVMValueRef *lod_fpart_inout,
998 LLVMValueRef *level0_out,
999 LLVMValueRef *level1_out)
1000 {
1001 LLVMBuilderRef builder = bld->gallivm->builder;
1002 struct lp_sampler_dynamic_state *dynamic_state = bld->dynamic_state;
1003 struct lp_build_context *leveli_bld = &bld->leveli_bld;
1004 struct lp_build_context *levelf_bld = &bld->levelf_bld;
1005 LLVMValueRef first_level, last_level;
1006 LLVMValueRef clamp_min;
1007 LLVMValueRef clamp_max;
1008
1009 assert(bld->num_lods == bld->num_mips);
1010
1011 first_level = dynamic_state->first_level(dynamic_state, bld->gallivm,
1012 bld->context_ptr, texture_unit);
1013 last_level = dynamic_state->last_level(dynamic_state, bld->gallivm,
1014 bld->context_ptr, texture_unit);
1015 first_level = lp_build_broadcast_scalar(leveli_bld, first_level);
1016 last_level = lp_build_broadcast_scalar(leveli_bld, last_level);
1017
1018 *level0_out = lp_build_add(leveli_bld, lod_ipart, first_level);
1019 *level1_out = lp_build_add(leveli_bld, *level0_out, leveli_bld->one);
1020
1021 /*
1022 * Clamp both *level0_out and *level1_out to [first_level, last_level], with
1023 * the minimum number of comparisons, and zeroing lod_fpart in the extreme
1024 * ends in the process.
1025 */
1026
1027 /* *level0_out < first_level */
1028 clamp_min = LLVMBuildICmp(builder, LLVMIntSLT,
1029 *level0_out, first_level,
1030 "clamp_lod_to_first");
1031
1032 *level0_out = LLVMBuildSelect(builder, clamp_min,
1033 first_level, *level0_out, "");
1034
1035 *level1_out = LLVMBuildSelect(builder, clamp_min,
1036 first_level, *level1_out, "");
1037
1038 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min,
1039 levelf_bld->zero, *lod_fpart_inout, "");
1040
1041 /* *level0_out >= last_level */
1042 clamp_max = LLVMBuildICmp(builder, LLVMIntSGE,
1043 *level0_out, last_level,
1044 "clamp_lod_to_last");
1045
1046 *level0_out = LLVMBuildSelect(builder, clamp_max,
1047 last_level, *level0_out, "");
1048
1049 *level1_out = LLVMBuildSelect(builder, clamp_max,
1050 last_level, *level1_out, "");
1051
1052 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max,
1053 levelf_bld->zero, *lod_fpart_inout, "");
1054
1055 lp_build_name(*level0_out, "texture%u_miplevel0", texture_unit);
1056 lp_build_name(*level1_out, "texture%u_miplevel1", texture_unit);
1057 lp_build_name(*lod_fpart_inout, "texture%u_mipweight", texture_unit);
1058 }
1059
1060
1061 /**
1062 * Return pointer to a single mipmap level.
1063 * \param level integer mipmap level
1064 */
1065 LLVMValueRef
1066 lp_build_get_mipmap_level(struct lp_build_sample_context *bld,
1067 LLVMValueRef level)
1068 {
1069 LLVMBuilderRef builder = bld->gallivm->builder;
1070 LLVMValueRef indexes[2], data_ptr, mip_offset;
1071
1072 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1073 indexes[1] = level;
1074 mip_offset = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1075 mip_offset = LLVMBuildLoad(builder, mip_offset, "");
1076 data_ptr = LLVMBuildGEP(builder, bld->base_ptr, &mip_offset, 1, "");
1077 return data_ptr;
1078 }
1079
1080 /**
1081 * Return (per-pixel) offsets to mip levels.
1082 * \param level integer mipmap level
1083 */
1084 LLVMValueRef
1085 lp_build_get_mip_offsets(struct lp_build_sample_context *bld,
1086 LLVMValueRef level)
1087 {
1088 LLVMBuilderRef builder = bld->gallivm->builder;
1089 LLVMValueRef indexes[2], offsets, offset1;
1090
1091 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1092 if (bld->num_mips == 1) {
1093 indexes[1] = level;
1094 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1095 offset1 = LLVMBuildLoad(builder, offset1, "");
1096 offsets = lp_build_broadcast_scalar(&bld->int_coord_bld, offset1);
1097 }
1098 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1099 unsigned i;
1100
1101 offsets = bld->int_coord_bld.undef;
1102 for (i = 0; i < bld->num_mips; i++) {
1103 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1104 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1105 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1106 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1107 offset1 = LLVMBuildLoad(builder, offset1, "");
1108 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexo, "");
1109 }
1110 offsets = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, offsets, 0, 4);
1111 }
1112 else {
1113 unsigned i;
1114
1115 assert (bld->num_mips == bld->coord_bld.type.length);
1116
1117 offsets = bld->int_coord_bld.undef;
1118 for (i = 0; i < bld->num_mips; i++) {
1119 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1120 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1121 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
1122 offset1 = LLVMBuildLoad(builder, offset1, "");
1123 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexi, "");
1124 }
1125 }
1126 return offsets;
1127 }
1128
1129
1130 /**
1131 * Codegen equivalent for u_minify().
1132 * @param lod_scalar if lod is a (broadcasted) scalar
1133 * Return max(1, base_size >> level);
1134 */
1135 LLVMValueRef
1136 lp_build_minify(struct lp_build_context *bld,
1137 LLVMValueRef base_size,
1138 LLVMValueRef level,
1139 boolean lod_scalar)
1140 {
1141 LLVMBuilderRef builder = bld->gallivm->builder;
1142 assert(lp_check_value(bld->type, base_size));
1143 assert(lp_check_value(bld->type, level));
1144
1145 if (level == bld->zero) {
1146 /* if we're using mipmap level zero, no minification is needed */
1147 return base_size;
1148 }
1149 else {
1150 LLVMValueRef size;
1151 assert(bld->type.sign);
1152 if (lod_scalar ||
1153 (util_cpu_caps.has_avx2 || !util_cpu_caps.has_sse)) {
1154 size = LLVMBuildLShr(builder, base_size, level, "minify");
1155 size = lp_build_max(bld, size, bld->one);
1156 }
1157 else {
1158 /*
1159 * emulate shift with float mul, since intel "forgot" shifts with
1160 * per-element shift count until avx2, which results in terrible
1161 * scalar extraction (both count and value), scalar shift,
1162 * vector reinsertion. Should not be an issue on any non-x86 cpu
1163 * with a vector instruction set.
1164 * On cpus with AMD's XOP this should also be unnecessary but I'm
1165 * not sure if llvm would emit this with current flags.
1166 */
1167 LLVMValueRef const127, const23, lf;
1168 struct lp_type ftype;
1169 struct lp_build_context fbld;
1170 ftype = lp_type_float_vec(32, bld->type.length * bld->type.width);
1171 lp_build_context_init(&fbld, bld->gallivm, ftype);
1172 const127 = lp_build_const_int_vec(bld->gallivm, bld->type, 127);
1173 const23 = lp_build_const_int_vec(bld->gallivm, bld->type, 23);
1174
1175 /* calculate 2^(-level) float */
1176 lf = lp_build_sub(bld, const127, level);
1177 lf = lp_build_shl(bld, lf, const23);
1178 lf = LLVMBuildBitCast(builder, lf, fbld.vec_type, "");
1179
1180 /* finish shift operation by doing float mul */
1181 base_size = lp_build_int_to_float(&fbld, base_size);
1182 size = lp_build_mul(&fbld, base_size, lf);
1183 /*
1184 * do the max also with floats because
1185 * a) non-emulated int max requires sse41
1186 * (this is actually a lie as we could cast to 16bit values
1187 * as 16bit is sufficient and 16bit int max is sse2)
1188 * b) with avx we can do int max 4-wide but float max 8-wide
1189 */
1190 size = lp_build_max(&fbld, size, fbld.one);
1191 size = lp_build_itrunc(&fbld, size);
1192 }
1193 return size;
1194 }
1195 }
1196
1197
1198 /**
1199 * Dereference stride_array[mipmap_level] array to get a stride.
1200 * Return stride as a vector.
1201 */
1202 static LLVMValueRef
1203 lp_build_get_level_stride_vec(struct lp_build_sample_context *bld,
1204 LLVMValueRef stride_array, LLVMValueRef level)
1205 {
1206 LLVMBuilderRef builder = bld->gallivm->builder;
1207 LLVMValueRef indexes[2], stride, stride1;
1208 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
1209 if (bld->num_mips == 1) {
1210 indexes[1] = level;
1211 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1212 stride1 = LLVMBuildLoad(builder, stride1, "");
1213 stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride1);
1214 }
1215 else if (bld->num_mips == bld->coord_bld.type.length / 4) {
1216 LLVMValueRef stride1;
1217 unsigned i;
1218
1219 stride = bld->int_coord_bld.undef;
1220 for (i = 0; i < bld->num_mips; i++) {
1221 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1222 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
1223 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1224 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1225 stride1 = LLVMBuildLoad(builder, stride1, "");
1226 stride = LLVMBuildInsertElement(builder, stride, stride1, indexo, "");
1227 }
1228 stride = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, stride, 0, 4);
1229 }
1230 else {
1231 LLVMValueRef stride1;
1232 unsigned i;
1233
1234 assert (bld->num_mips == bld->coord_bld.type.length);
1235
1236 stride = bld->int_coord_bld.undef;
1237 for (i = 0; i < bld->coord_bld.type.length; i++) {
1238 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1239 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
1240 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
1241 stride1 = LLVMBuildLoad(builder, stride1, "");
1242 stride = LLVMBuildInsertElement(builder, stride, stride1, indexi, "");
1243 }
1244 }
1245 return stride;
1246 }
1247
1248
1249 /**
1250 * When sampling a mipmap, we need to compute the width, height, depth
1251 * of the source levels from the level indexes. This helper function
1252 * does that.
1253 */
1254 void
1255 lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld,
1256 LLVMValueRef ilevel,
1257 LLVMValueRef *out_size,
1258 LLVMValueRef *row_stride_vec,
1259 LLVMValueRef *img_stride_vec)
1260 {
1261 const unsigned dims = bld->dims;
1262 LLVMValueRef ilevel_vec;
1263
1264 /*
1265 * Compute width, height, depth at mipmap level 'ilevel'
1266 */
1267 if (bld->num_mips == 1) {
1268 ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel);
1269 *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec, TRUE);
1270 }
1271 else {
1272 LLVMValueRef int_size_vec;
1273 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
1274 unsigned num_quads = bld->coord_bld.type.length / 4;
1275 unsigned i;
1276
1277 if (bld->num_mips == num_quads) {
1278 /*
1279 * XXX: this should be #ifndef SANE_INSTRUCTION_SET.
1280 * intel "forgot" the variable shift count instruction until avx2.
1281 * A harmless 8x32 shift gets translated into 32 instructions
1282 * (16 extracts, 8 scalar shifts, 8 inserts), llvm is apparently
1283 * unable to recognize if there are really just 2 different shift
1284 * count values. So do the shift 4-wide before expansion.
1285 */
1286 struct lp_build_context bld4;
1287 struct lp_type type4;
1288
1289 type4 = bld->int_coord_bld.type;
1290 type4.length = 4;
1291
1292 lp_build_context_init(&bld4, bld->gallivm, type4);
1293
1294 if (bld->dims == 1) {
1295 assert(bld->int_size_in_bld.type.length == 1);
1296 int_size_vec = lp_build_broadcast_scalar(&bld4,
1297 bld->int_size);
1298 }
1299 else {
1300 assert(bld->int_size_in_bld.type.length == 4);
1301 int_size_vec = bld->int_size;
1302 }
1303
1304 for (i = 0; i < num_quads; i++) {
1305 LLVMValueRef ileveli;
1306 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1307
1308 ileveli = lp_build_extract_broadcast(bld->gallivm,
1309 bld->leveli_bld.type,
1310 bld4.type,
1311 ilevel,
1312 indexi);
1313 tmp[i] = lp_build_minify(&bld4, int_size_vec, ileveli, TRUE);
1314 }
1315 /*
1316 * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1,
1317 * [w0, w0, w0, w0, w1, w1, w1, w1, ...] otherwise.
1318 */
1319 *out_size = lp_build_concat(bld->gallivm,
1320 tmp,
1321 bld4.type,
1322 num_quads);
1323 }
1324 else {
1325 /* FIXME: this is terrible and results in _huge_ vector
1326 * (for the dims > 1 case).
1327 * Should refactor this (together with extract_image_sizes) and do
1328 * something more useful. Could for instance if we have width,height
1329 * with 4-wide vector pack all elements into a 8xi16 vector
1330 * (on which we can still do useful math) instead of using a 16xi32
1331 * vector.
1332 * For dims == 1 this will create [w0, w1, w2, w3, ...] vector.
1333 * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector.
1334 */
1335 assert(bld->num_mips == bld->coord_bld.type.length);
1336 if (bld->dims == 1) {
1337 assert(bld->int_size_in_bld.type.length == 1);
1338 int_size_vec = lp_build_broadcast_scalar(&bld->int_coord_bld,
1339 bld->int_size);
1340 *out_size = lp_build_minify(&bld->int_coord_bld, int_size_vec, ilevel, FALSE);
1341 }
1342 else {
1343 LLVMValueRef ilevel1;
1344 for (i = 0; i < bld->num_mips; i++) {
1345 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
1346 ilevel1 = lp_build_extract_broadcast(bld->gallivm, bld->int_coord_type,
1347 bld->int_size_in_bld.type, ilevel, indexi);
1348 tmp[i] = bld->int_size;
1349 tmp[i] = lp_build_minify(&bld->int_size_in_bld, tmp[i], ilevel1, TRUE);
1350 }
1351 *out_size = lp_build_concat(bld->gallivm, tmp,
1352 bld->int_size_in_bld.type,
1353 bld->num_mips);
1354 }
1355 }
1356 }
1357
1358 if (dims >= 2) {
1359 *row_stride_vec = lp_build_get_level_stride_vec(bld,
1360 bld->row_stride_array,
1361 ilevel);
1362 }
1363 if (dims == 3 || has_layer_coord(bld->static_texture_state->target)) {
1364 *img_stride_vec = lp_build_get_level_stride_vec(bld,
1365 bld->img_stride_array,
1366 ilevel);
1367 }
1368 }
1369
1370
1371 /**
1372 * Extract and broadcast texture size.
1373 *
1374 * @param size_type type of the texture size vector (either
1375 * bld->int_size_type or bld->float_size_type)
1376 * @param coord_type type of the texture size vector (either
1377 * bld->int_coord_type or bld->coord_type)
1378 * @param size vector with the texture size (width, height, depth)
1379 */
1380 void
1381 lp_build_extract_image_sizes(struct lp_build_sample_context *bld,
1382 struct lp_build_context *size_bld,
1383 struct lp_type coord_type,
1384 LLVMValueRef size,
1385 LLVMValueRef *out_width,
1386 LLVMValueRef *out_height,
1387 LLVMValueRef *out_depth)
1388 {
1389 const unsigned dims = bld->dims;
1390 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1391 struct lp_type size_type = size_bld->type;
1392
1393 if (bld->num_mips == 1) {
1394 *out_width = lp_build_extract_broadcast(bld->gallivm,
1395 size_type,
1396 coord_type,
1397 size,
1398 LLVMConstInt(i32t, 0, 0));
1399 if (dims >= 2) {
1400 *out_height = lp_build_extract_broadcast(bld->gallivm,
1401 size_type,
1402 coord_type,
1403 size,
1404 LLVMConstInt(i32t, 1, 0));
1405 if (dims == 3) {
1406 *out_depth = lp_build_extract_broadcast(bld->gallivm,
1407 size_type,
1408 coord_type,
1409 size,
1410 LLVMConstInt(i32t, 2, 0));
1411 }
1412 }
1413 }
1414 else {
1415 unsigned num_quads = bld->coord_bld.type.length / 4;
1416
1417 if (dims == 1) {
1418 *out_width = size;
1419 }
1420 else if (bld->num_mips == num_quads) {
1421 *out_width = lp_build_swizzle_scalar_aos(size_bld, size, 0, 4);
1422 if (dims >= 2) {
1423 *out_height = lp_build_swizzle_scalar_aos(size_bld, size, 1, 4);
1424 if (dims == 3) {
1425 *out_depth = lp_build_swizzle_scalar_aos(size_bld, size, 2, 4);
1426 }
1427 }
1428 }
1429 else {
1430 assert(bld->num_mips == bld->coord_type.length);
1431 *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1432 coord_type, size, 0);
1433 if (dims >= 2) {
1434 *out_height = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1435 coord_type, size, 1);
1436 if (dims == 3) {
1437 *out_depth = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1438 coord_type, size, 2);
1439 }
1440 }
1441 }
1442 }
1443 }
1444
1445
1446 /**
1447 * Unnormalize coords.
1448 *
1449 * @param flt_size vector with the integer texture size (width, height, depth)
1450 */
1451 void
1452 lp_build_unnormalized_coords(struct lp_build_sample_context *bld,
1453 LLVMValueRef flt_size,
1454 LLVMValueRef *s,
1455 LLVMValueRef *t,
1456 LLVMValueRef *r)
1457 {
1458 const unsigned dims = bld->dims;
1459 LLVMValueRef width;
1460 LLVMValueRef height = NULL;
1461 LLVMValueRef depth = NULL;
1462
1463 lp_build_extract_image_sizes(bld,
1464 &bld->float_size_bld,
1465 bld->coord_type,
1466 flt_size,
1467 &width,
1468 &height,
1469 &depth);
1470
1471 /* s = s * width, t = t * height */
1472 *s = lp_build_mul(&bld->coord_bld, *s, width);
1473 if (dims >= 2) {
1474 *t = lp_build_mul(&bld->coord_bld, *t, height);
1475 if (dims >= 3) {
1476 *r = lp_build_mul(&bld->coord_bld, *r, depth);
1477 }
1478 }
1479 }
1480
1481 /**
1482 * Generate new coords and faces for cubemap texels falling off the face.
1483 *
1484 * @param face face (center) of the pixel
1485 * @param x0 lower x coord
1486 * @param x1 higher x coord (must be x0 + 1)
1487 * @param y0 lower y coord
1488 * @param y1 higher y coord (must be x0 + 1)
1489 * @param max_coord texture cube (level) size - 1
1490 * @param next_faces new face values when falling off
1491 * @param next_xcoords new x coord values when falling off
1492 * @param next_ycoords new y coord values when falling off
1493 *
1494 * The arrays hold the new values when under/overflow of
1495 * lower x, higher x, lower y, higher y coord would occur (in this order).
1496 * next_xcoords/next_ycoords have two entries each (for both new lower and
1497 * higher coord).
1498 */
1499 void
1500 lp_build_cube_new_coords(struct lp_build_context *ivec_bld,
1501 LLVMValueRef face,
1502 LLVMValueRef x0,
1503 LLVMValueRef x1,
1504 LLVMValueRef y0,
1505 LLVMValueRef y1,
1506 LLVMValueRef max_coord,
1507 LLVMValueRef next_faces[4],
1508 LLVMValueRef next_xcoords[4][2],
1509 LLVMValueRef next_ycoords[4][2])
1510 {
1511 /*
1512 * Lookup tables aren't nice for simd code hence try some logic here.
1513 * (Note that while it would not be necessary to do per-sample (4) lookups
1514 * when using a LUT as it's impossible that texels fall off of positive
1515 * and negative edges simultaneously, it would however be necessary to
1516 * do 2 lookups for corner handling as in this case texels both fall off
1517 * of x and y axes.)
1518 */
1519 /*
1520 * Next faces (for face 012345):
1521 * x < 0.0 : 451110
1522 * x >= 1.0 : 540001
1523 * y < 0.0 : 225422
1524 * y >= 1.0 : 334533
1525 * Hence nfx+ (and nfy+) == nfx- (nfy-) xor 1
1526 * nfx-: face > 1 ? (face == 5 ? 0 : 1) : (4 + face & 1)
1527 * nfy+: face & ~4 > 1 ? face + 2 : 3;
1528 * This could also use pshufb instead, but would need (manually coded)
1529 * ssse3 intrinsic (llvm won't do non-constant shuffles).
1530 */
1531 struct gallivm_state *gallivm = ivec_bld->gallivm;
1532 LLVMValueRef sel, sel_f2345, sel_f23, sel_f2, tmpsel, tmp;
1533 LLVMValueRef faceand1, sel_fand1, maxmx0, maxmx1, maxmy0, maxmy1;
1534 LLVMValueRef c2 = lp_build_const_int_vec(gallivm, ivec_bld->type, 2);
1535 LLVMValueRef c3 = lp_build_const_int_vec(gallivm, ivec_bld->type, 3);
1536 LLVMValueRef c4 = lp_build_const_int_vec(gallivm, ivec_bld->type, 4);
1537 LLVMValueRef c5 = lp_build_const_int_vec(gallivm, ivec_bld->type, 5);
1538
1539 sel = lp_build_cmp(ivec_bld, PIPE_FUNC_EQUAL, face, c5);
1540 tmpsel = lp_build_select(ivec_bld, sel, ivec_bld->zero, ivec_bld->one);
1541 sel_f2345 = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, face, ivec_bld->one);
1542 faceand1 = lp_build_and(ivec_bld, face, ivec_bld->one);
1543 tmp = lp_build_add(ivec_bld, faceand1, c4);
1544 next_faces[0] = lp_build_select(ivec_bld, sel_f2345, tmpsel, tmp);
1545 next_faces[1] = lp_build_xor(ivec_bld, next_faces[0], ivec_bld->one);
1546
1547 tmp = lp_build_andnot(ivec_bld, face, c4);
1548 sel_f23 = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, tmp, ivec_bld->one);
1549 tmp = lp_build_add(ivec_bld, face, c2);
1550 next_faces[3] = lp_build_select(ivec_bld, sel_f23, tmp, c3);
1551 next_faces[2] = lp_build_xor(ivec_bld, next_faces[3], ivec_bld->one);
1552
1553 /*
1554 * new xcoords (for face 012345):
1555 * x < 0.0 : max max t max-t max max
1556 * x >= 1.0 : 0 0 max-t t 0 0
1557 * y < 0.0 : max 0 max-s s s max-s
1558 * y >= 1.0 : max 0 s max-s s max-s
1559 *
1560 * ncx[1] = face & ~4 > 1 ? (face == 2 ? max-t : t) : 0
1561 * ncx[0] = max - ncx[1]
1562 * ncx[3] = face > 1 ? (face & 1 ? max-s : s) : (face & 1) ? 0 : max
1563 * ncx[2] = face & ~4 > 1 ? max - ncx[3] : ncx[3]
1564 */
1565 sel_f2 = lp_build_cmp(ivec_bld, PIPE_FUNC_EQUAL, face, c2);
1566 maxmy0 = lp_build_sub(ivec_bld, max_coord, y0);
1567 tmp = lp_build_select(ivec_bld, sel_f2, maxmy0, y0);
1568 next_xcoords[1][0] = lp_build_select(ivec_bld, sel_f23, tmp, ivec_bld->zero);
1569 next_xcoords[0][0] = lp_build_sub(ivec_bld, max_coord, next_xcoords[1][0]);
1570 maxmy1 = lp_build_sub(ivec_bld, max_coord, y1);
1571 tmp = lp_build_select(ivec_bld, sel_f2, maxmy1, y1);
1572 next_xcoords[1][1] = lp_build_select(ivec_bld, sel_f23, tmp, ivec_bld->zero);
1573 next_xcoords[0][1] = lp_build_sub(ivec_bld, max_coord, next_xcoords[1][1]);
1574
1575 sel_fand1 = lp_build_cmp(ivec_bld, PIPE_FUNC_EQUAL, faceand1, ivec_bld->one);
1576
1577 tmpsel = lp_build_select(ivec_bld, sel_fand1, ivec_bld->zero, max_coord);
1578 maxmx0 = lp_build_sub(ivec_bld, max_coord, x0);
1579 tmp = lp_build_select(ivec_bld, sel_fand1, maxmx0, x0);
1580 next_xcoords[3][0] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel);
1581 tmp = lp_build_sub(ivec_bld, max_coord, next_xcoords[3][0]);
1582 next_xcoords[2][0] = lp_build_select(ivec_bld, sel_f23, tmp, next_xcoords[3][0]);
1583 maxmx1 = lp_build_sub(ivec_bld, max_coord, x1);
1584 tmp = lp_build_select(ivec_bld, sel_fand1, maxmx1, x1);
1585 next_xcoords[3][1] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel);
1586 tmp = lp_build_sub(ivec_bld, max_coord, next_xcoords[3][1]);
1587 next_xcoords[2][1] = lp_build_select(ivec_bld, sel_f23, tmp, next_xcoords[3][1]);
1588
1589 /*
1590 * new ycoords (for face 012345):
1591 * x < 0.0 : t t 0 max t t
1592 * x >= 1.0 : t t 0 max t t
1593 * y < 0.0 : max-s s 0 max max 0
1594 * y >= 1.0 : s max-s 0 max 0 max
1595 *
1596 * ncy[0] = face & ~4 > 1 ? (face == 2 ? 0 : max) : t
1597 * ncy[1] = ncy[0]
1598 * ncy[3] = face > 1 ? (face & 1 ? max : 0) : (face & 1) ? max-s : max
1599 * ncx[2] = face & ~4 > 1 ? max - ncx[3] : ncx[3]
1600 */
1601 tmp = lp_build_select(ivec_bld, sel_f2, ivec_bld->zero, max_coord);
1602 next_ycoords[0][0] = lp_build_select(ivec_bld, sel_f23, tmp, y0);
1603 next_ycoords[1][0] = next_ycoords[0][0];
1604 next_ycoords[0][1] = lp_build_select(ivec_bld, sel_f23, tmp, y1);
1605 next_ycoords[1][1] = next_ycoords[0][1];
1606
1607 tmpsel = lp_build_select(ivec_bld, sel_fand1, maxmx0, x0);
1608 tmp = lp_build_select(ivec_bld, sel_fand1, max_coord, ivec_bld->zero);
1609 next_ycoords[3][0] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel);
1610 tmp = lp_build_sub(ivec_bld, max_coord, next_ycoords[3][0]);
1611 next_ycoords[2][0] = lp_build_select(ivec_bld, sel_f23, next_ycoords[3][0], tmp);
1612 tmpsel = lp_build_select(ivec_bld, sel_fand1, maxmx1, x1);
1613 tmp = lp_build_select(ivec_bld, sel_fand1, max_coord, ivec_bld->zero);
1614 next_ycoords[3][1] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel);
1615 tmp = lp_build_sub(ivec_bld, max_coord, next_ycoords[3][1]);
1616 next_ycoords[2][1] = lp_build_select(ivec_bld, sel_f23, next_ycoords[3][1], tmp);
1617 }
1618
1619
1620 /** Helper used by lp_build_cube_lookup() */
1621 static LLVMValueRef
1622 lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord)
1623 {
1624 /* ima = +0.5 / abs(coord); */
1625 LLVMValueRef posHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1626 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1627 LLVMValueRef ima = lp_build_div(coord_bld, posHalf, absCoord);
1628 return ima;
1629 }
1630
1631
1632 /** Helper for doing 3-wise selection.
1633 * Returns sel1 ? val2 : (sel0 ? val0 : val1).
1634 */
1635 static LLVMValueRef
1636 lp_build_select3(struct lp_build_context *sel_bld,
1637 LLVMValueRef sel0,
1638 LLVMValueRef sel1,
1639 LLVMValueRef val0,
1640 LLVMValueRef val1,
1641 LLVMValueRef val2)
1642 {
1643 LLVMValueRef tmp;
1644 tmp = lp_build_select(sel_bld, sel0, val0, val1);
1645 return lp_build_select(sel_bld, sel1, val2, tmp);
1646 }
1647
1648
1649 /**
1650 * Generate code to do cube face selection and compute per-face texcoords.
1651 */
1652 void
1653 lp_build_cube_lookup(struct lp_build_sample_context *bld,
1654 LLVMValueRef *coords,
1655 const struct lp_derivatives *derivs_in, /* optional */
1656 LLVMValueRef *rho,
1657 struct lp_derivatives *derivs_out, /* optional */
1658 boolean need_derivs)
1659 {
1660 struct lp_build_context *coord_bld = &bld->coord_bld;
1661 LLVMBuilderRef builder = bld->gallivm->builder;
1662 struct gallivm_state *gallivm = bld->gallivm;
1663 LLVMValueRef si, ti, ri;
1664
1665 /*
1666 * Do per-pixel face selection. We cannot however (as we used to do)
1667 * simply calculate the derivs afterwards (which is very bogus for
1668 * explicit derivs btw) because the values would be "random" when
1669 * not all pixels lie on the same face. So what we do here is just
1670 * calculate the derivatives after scaling the coords by the absolute
1671 * value of the inverse major axis, and essentially do rho calculation
1672 * steps as if it were a 3d texture. This is perfect if all pixels hit
1673 * the same face, but not so great at edges, I believe the max error
1674 * should be sqrt(2) with no_rho_approx or 2 otherwise (essentially measuring
1675 * the 3d distance between 2 points on the cube instead of measuring up/down
1676 * the edge). Still this is possibly a win over just selecting the same face
1677 * for all pixels. Unfortunately, something like that doesn't work for
1678 * explicit derivatives.
1679 */
1680 struct lp_build_context *cint_bld = &bld->int_coord_bld;
1681 struct lp_type intctype = cint_bld->type;
1682 LLVMTypeRef coord_vec_type = coord_bld->vec_type;
1683 LLVMTypeRef cint_vec_type = cint_bld->vec_type;
1684 LLVMValueRef as, at, ar, face, face_s, face_t;
1685 LLVMValueRef as_ge_at, maxasat, ar_ge_as_at;
1686 LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz;
1687 LLVMValueRef tnegi, rnegi;
1688 LLVMValueRef ma, mai, signma, signmabit, imahalfpos;
1689 LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5);
1690 LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype,
1691 1LL << (intctype.width - 1));
1692 LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype,
1693 intctype.width -1);
1694 LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X);
1695 LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y);
1696 LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z);
1697 LLVMValueRef s = coords[0];
1698 LLVMValueRef t = coords[1];
1699 LLVMValueRef r = coords[2];
1700
1701 assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1);
1702 assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1);
1703 assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1);
1704
1705 /*
1706 * get absolute value (for x/y/z face selection) and sign bit
1707 * (for mirroring minor coords and pos/neg face selection)
1708 * of the original coords.
1709 */
1710 as = lp_build_abs(&bld->coord_bld, s);
1711 at = lp_build_abs(&bld->coord_bld, t);
1712 ar = lp_build_abs(&bld->coord_bld, r);
1713
1714 /*
1715 * major face determination: select x if x > y else select y
1716 * select z if z >= max(x,y) else select previous result
1717 * if some axis are the same we chose z over y, y over x - the
1718 * dx10 spec seems to ask for it while OpenGL doesn't care (if we
1719 * wouldn't care could save a select or two if using different
1720 * compares and doing at_g_as_ar last since tnewx and tnewz are the
1721 * same).
1722 */
1723 as_ge_at = lp_build_cmp(coord_bld, PIPE_FUNC_GREATER, as, at);
1724 maxasat = lp_build_max(coord_bld, as, at);
1725 ar_ge_as_at = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, ar, maxasat);
1726
1727 if (need_derivs && (derivs_in || (bld->no_quad_lod && bld->no_rho_approx))) {
1728 /*
1729 * XXX: This is really really complex.
1730 * It is a bit overkill to use this for implicit derivatives as well,
1731 * no way this is worth the cost in practice, but seems to be the
1732 * only way for getting accurate and per-pixel lod values.
1733 */
1734 LLVMValueRef ima, imahalf, tmp, ddx[3], ddy[3];
1735 LLVMValueRef madx, mady, madxdivma, madydivma;
1736 LLVMValueRef sdxi, tdxi, rdxi, sdyi, tdyi, rdyi;
1737 LLVMValueRef tdxnegi, rdxnegi, tdynegi, rdynegi;
1738 LLVMValueRef sdxnewx, sdxnewy, sdxnewz, tdxnewx, tdxnewy, tdxnewz;
1739 LLVMValueRef sdynewx, sdynewy, sdynewz, tdynewx, tdynewy, tdynewz;
1740 LLVMValueRef face_sdx, face_tdx, face_sdy, face_tdy;
1741 /*
1742 * s = 1/2 * ( sc / ma + 1)
1743 * t = 1/2 * ( tc / ma + 1)
1744 *
1745 * s' = 1/2 * (sc' * ma - sc * ma') / ma^2
1746 * t' = 1/2 * (tc' * ma - tc * ma') / ma^2
1747 *
1748 * dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma
1749 * dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma
1750 * dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma
1751 * dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma
1752 */
1753
1754 /* select ma, calculate ima */
1755 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1756 mai = LLVMBuildBitCast(builder, ma, cint_vec_type, "");
1757 signmabit = LLVMBuildAnd(builder, mai, signmask, "");
1758 ima = lp_build_div(coord_bld, coord_bld->one, ma);
1759 imahalf = lp_build_mul(coord_bld, posHalf, ima);
1760 imahalfpos = lp_build_abs(coord_bld, imahalf);
1761
1762 if (!derivs_in) {
1763 ddx[0] = lp_build_ddx(coord_bld, s);
1764 ddx[1] = lp_build_ddx(coord_bld, t);
1765 ddx[2] = lp_build_ddx(coord_bld, r);
1766 ddy[0] = lp_build_ddy(coord_bld, s);
1767 ddy[1] = lp_build_ddy(coord_bld, t);
1768 ddy[2] = lp_build_ddy(coord_bld, r);
1769 }
1770 else {
1771 ddx[0] = derivs_in->ddx[0];
1772 ddx[1] = derivs_in->ddx[1];
1773 ddx[2] = derivs_in->ddx[2];
1774 ddy[0] = derivs_in->ddy[0];
1775 ddy[1] = derivs_in->ddy[1];
1776 ddy[2] = derivs_in->ddy[2];
1777 }
1778
1779 /* select major derivatives */
1780 madx = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddx[0], ddx[1], ddx[2]);
1781 mady = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddy[0], ddy[1], ddy[2]);
1782
1783 si = LLVMBuildBitCast(builder, s, cint_vec_type, "");
1784 ti = LLVMBuildBitCast(builder, t, cint_vec_type, "");
1785 ri = LLVMBuildBitCast(builder, r, cint_vec_type, "");
1786
1787 sdxi = LLVMBuildBitCast(builder, ddx[0], cint_vec_type, "");
1788 tdxi = LLVMBuildBitCast(builder, ddx[1], cint_vec_type, "");
1789 rdxi = LLVMBuildBitCast(builder, ddx[2], cint_vec_type, "");
1790
1791 sdyi = LLVMBuildBitCast(builder, ddy[0], cint_vec_type, "");
1792 tdyi = LLVMBuildBitCast(builder, ddy[1], cint_vec_type, "");
1793 rdyi = LLVMBuildBitCast(builder, ddy[2], cint_vec_type, "");
1794
1795 /*
1796 * compute all possible new s/t coords, which does the mirroring,
1797 * and do the same for derivs minor axes.
1798 * snewx = signma * -r;
1799 * tnewx = -t;
1800 * snewy = s;
1801 * tnewy = signma * r;
1802 * snewz = signma * s;
1803 * tnewz = -t;
1804 */
1805 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1806 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1807 tdxnegi = LLVMBuildXor(builder, tdxi, signmask, "");
1808 rdxnegi = LLVMBuildXor(builder, rdxi, signmask, "");
1809 tdynegi = LLVMBuildXor(builder, tdyi, signmask, "");
1810 rdynegi = LLVMBuildXor(builder, rdyi, signmask, "");
1811
1812 snewx = LLVMBuildXor(builder, signmabit, rnegi, "");
1813 tnewx = tnegi;
1814 sdxnewx = LLVMBuildXor(builder, signmabit, rdxnegi, "");
1815 tdxnewx = tdxnegi;
1816 sdynewx = LLVMBuildXor(builder, signmabit, rdynegi, "");
1817 tdynewx = tdynegi;
1818
1819 snewy = si;
1820 tnewy = LLVMBuildXor(builder, signmabit, ri, "");
1821 sdxnewy = sdxi;
1822 tdxnewy = LLVMBuildXor(builder, signmabit, rdxi, "");
1823 sdynewy = sdyi;
1824 tdynewy = LLVMBuildXor(builder, signmabit, rdyi, "");
1825
1826 snewz = LLVMBuildXor(builder, signmabit, si, "");
1827 tnewz = tnegi;
1828 sdxnewz = LLVMBuildXor(builder, signmabit, sdxi, "");
1829 tdxnewz = tdxnegi;
1830 sdynewz = LLVMBuildXor(builder, signmabit, sdyi, "");
1831 tdynewz = tdynegi;
1832
1833 /* select the mirrored values */
1834 face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez);
1835 face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz);
1836 face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz);
1837 face_sdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdxnewx, sdxnewy, sdxnewz);
1838 face_tdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdxnewx, tdxnewy, tdxnewz);
1839 face_sdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdynewx, sdynewy, sdynewz);
1840 face_tdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdynewx, tdynewy, tdynewz);
1841
1842 face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, "");
1843 face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, "");
1844 face_sdx = LLVMBuildBitCast(builder, face_sdx, coord_vec_type, "");
1845 face_tdx = LLVMBuildBitCast(builder, face_tdx, coord_vec_type, "");
1846 face_sdy = LLVMBuildBitCast(builder, face_sdy, coord_vec_type, "");
1847 face_tdy = LLVMBuildBitCast(builder, face_tdy, coord_vec_type, "");
1848
1849 /* deriv math, dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma */
1850 madxdivma = lp_build_mul(coord_bld, madx, ima);
1851 tmp = lp_build_mul(coord_bld, madxdivma, face_s);
1852 tmp = lp_build_sub(coord_bld, face_sdx, tmp);
1853 derivs_out->ddx[0] = lp_build_mul(coord_bld, tmp, imahalf);
1854
1855 /* dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma */
1856 tmp = lp_build_mul(coord_bld, madxdivma, face_t);
1857 tmp = lp_build_sub(coord_bld, face_tdx, tmp);
1858 derivs_out->ddx[1] = lp_build_mul(coord_bld, tmp, imahalf);
1859
1860 /* dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma */
1861 madydivma = lp_build_mul(coord_bld, mady, ima);
1862 tmp = lp_build_mul(coord_bld, madydivma, face_s);
1863 tmp = lp_build_sub(coord_bld, face_sdy, tmp);
1864 derivs_out->ddy[0] = lp_build_mul(coord_bld, tmp, imahalf);
1865
1866 /* dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma */
1867 tmp = lp_build_mul(coord_bld, madydivma, face_t);
1868 tmp = lp_build_sub(coord_bld, face_tdy, tmp);
1869 derivs_out->ddy[1] = lp_build_mul(coord_bld, tmp, imahalf);
1870
1871 signma = LLVMBuildLShr(builder, mai, signshift, "");
1872 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1873
1874 /* project coords */
1875 face_s = lp_build_mul(coord_bld, face_s, imahalfpos);
1876 face_t = lp_build_mul(coord_bld, face_t, imahalfpos);
1877
1878 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
1879 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
1880
1881 return;
1882 }
1883
1884 else if (need_derivs) {
1885 LLVMValueRef ddx_ddy[2], tmp[3], rho_vec;
1886 static const unsigned char swizzle0[] = { /* no-op swizzle */
1887 0, LP_BLD_SWIZZLE_DONTCARE,
1888 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1889 };
1890 static const unsigned char swizzle1[] = {
1891 1, LP_BLD_SWIZZLE_DONTCARE,
1892 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1893 };
1894 static const unsigned char swizzle01[] = { /* no-op swizzle */
1895 0, 1,
1896 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1897 };
1898 static const unsigned char swizzle23[] = {
1899 2, 3,
1900 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1901 };
1902 static const unsigned char swizzle02[] = {
1903 0, 2,
1904 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
1905 };
1906
1907 /*
1908 * scale the s/t/r coords pre-select/mirror so we can calculate
1909 * "reasonable" derivs.
1910 */
1911 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1912 imahalfpos = lp_build_cube_imapos(coord_bld, ma);
1913 s = lp_build_mul(coord_bld, s, imahalfpos);
1914 t = lp_build_mul(coord_bld, t, imahalfpos);
1915 r = lp_build_mul(coord_bld, r, imahalfpos);
1916
1917 /*
1918 * This isn't quite the same as the "ordinary" (3d deriv) path since we
1919 * know the texture is square which simplifies things (we can omit the
1920 * size mul which happens very early completely here and do it at the
1921 * very end).
1922 * Also always do calculations according to GALLIVM_DEBUG_NO_RHO_APPROX
1923 * since the error can get quite big otherwise at edges.
1924 * (With no_rho_approx max error is sqrt(2) at edges, same as it is
1925 * without no_rho_approx for 2d textures, otherwise it would be factor 2.)
1926 */
1927 ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t);
1928 ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r);
1929
1930 ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]);
1931 ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]);
1932
1933 tmp[0] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01);
1934 tmp[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23);
1935 tmp[2] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02);
1936
1937 rho_vec = lp_build_add(coord_bld, tmp[0], tmp[1]);
1938 rho_vec = lp_build_add(coord_bld, rho_vec, tmp[2]);
1939
1940 tmp[0] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0);
1941 tmp[1] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
1942 *rho = lp_build_max(coord_bld, tmp[0], tmp[1]);
1943 }
1944
1945 if (!need_derivs) {
1946 ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r);
1947 }
1948 mai = LLVMBuildBitCast(builder, ma, cint_vec_type, "");
1949 signmabit = LLVMBuildAnd(builder, mai, signmask, "");
1950
1951 si = LLVMBuildBitCast(builder, s, cint_vec_type, "");
1952 ti = LLVMBuildBitCast(builder, t, cint_vec_type, "");
1953 ri = LLVMBuildBitCast(builder, r, cint_vec_type, "");
1954
1955 /*
1956 * compute all possible new s/t coords, which does the mirroring
1957 * snewx = signma * -r;
1958 * tnewx = -t;
1959 * snewy = s;
1960 * tnewy = signma * r;
1961 * snewz = signma * s;
1962 * tnewz = -t;
1963 */
1964 tnegi = LLVMBuildXor(builder, ti, signmask, "");
1965 rnegi = LLVMBuildXor(builder, ri, signmask, "");
1966
1967 snewx = LLVMBuildXor(builder, signmabit, rnegi, "");
1968 tnewx = tnegi;
1969
1970 snewy = si;
1971 tnewy = LLVMBuildXor(builder, signmabit, ri, "");
1972
1973 snewz = LLVMBuildXor(builder, signmabit, si, "");
1974 tnewz = tnegi;
1975
1976 /* select the mirrored values */
1977 face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz);
1978 face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz);
1979 face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez);
1980
1981 face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, "");
1982 face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, "");
1983
1984 /* add +1 for neg face */
1985 /* XXX with AVX probably want to use another select here -
1986 * as long as we ensure vblendvps gets used we can actually
1987 * skip the comparison and just use sign as a "mask" directly.
1988 */
1989 signma = LLVMBuildLShr(builder, mai, signshift, "");
1990 coords[2] = LLVMBuildOr(builder, face, signma, "face");
1991
1992 /* project coords */
1993 if (!need_derivs) {
1994 imahalfpos = lp_build_cube_imapos(coord_bld, ma);
1995 face_s = lp_build_mul(coord_bld, face_s, imahalfpos);
1996 face_t = lp_build_mul(coord_bld, face_t, imahalfpos);
1997 }
1998
1999 coords[0] = lp_build_add(coord_bld, face_s, posHalf);
2000 coords[1] = lp_build_add(coord_bld, face_t, posHalf);
2001 }
2002
2003
2004 /**
2005 * Compute the partial offset of a pixel block along an arbitrary axis.
2006 *
2007 * @param coord coordinate in pixels
2008 * @param stride number of bytes between rows of successive pixel blocks
2009 * @param block_length number of pixels in a pixels block along the coordinate
2010 * axis
2011 * @param out_offset resulting relative offset of the pixel block in bytes
2012 * @param out_subcoord resulting sub-block pixel coordinate
2013 */
2014 void
2015 lp_build_sample_partial_offset(struct lp_build_context *bld,
2016 unsigned block_length,
2017 LLVMValueRef coord,
2018 LLVMValueRef stride,
2019 LLVMValueRef *out_offset,
2020 LLVMValueRef *out_subcoord)
2021 {
2022 LLVMBuilderRef builder = bld->gallivm->builder;
2023 LLVMValueRef offset;
2024 LLVMValueRef subcoord;
2025
2026 if (block_length == 1) {
2027 subcoord = bld->zero;
2028 }
2029 else {
2030 /*
2031 * Pixel blocks have power of two dimensions. LLVM should convert the
2032 * rem/div to bit arithmetic.
2033 * TODO: Verify this.
2034 * It does indeed BUT it does transform it to scalar (and back) when doing so
2035 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
2036 * The generated code looks seriously unfunny and is quite expensive.
2037 */
2038 #if 0
2039 LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length);
2040 subcoord = LLVMBuildURem(builder, coord, block_width, "");
2041 coord = LLVMBuildUDiv(builder, coord, block_width, "");
2042 #else
2043 unsigned logbase2 = util_logbase2(block_length);
2044 LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2);
2045 LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1);
2046 subcoord = LLVMBuildAnd(builder, coord, block_mask, "");
2047 coord = LLVMBuildLShr(builder, coord, block_shift, "");
2048 #endif
2049 }
2050
2051 offset = lp_build_mul(bld, coord, stride);
2052
2053 assert(out_offset);
2054 assert(out_subcoord);
2055
2056 *out_offset = offset;
2057 *out_subcoord = subcoord;
2058 }
2059
2060
2061 /**
2062 * Compute the offset of a pixel block.
2063 *
2064 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
2065 *
2066 * Returns the relative offset and i,j sub-block coordinates
2067 */
2068 void
2069 lp_build_sample_offset(struct lp_build_context *bld,
2070 const struct util_format_description *format_desc,
2071 LLVMValueRef x,
2072 LLVMValueRef y,
2073 LLVMValueRef z,
2074 LLVMValueRef y_stride,
2075 LLVMValueRef z_stride,
2076 LLVMValueRef *out_offset,
2077 LLVMValueRef *out_i,
2078 LLVMValueRef *out_j)
2079 {
2080 LLVMValueRef x_stride;
2081 LLVMValueRef offset;
2082
2083 x_stride = lp_build_const_vec(bld->gallivm, bld->type,
2084 format_desc->block.bits/8);
2085
2086 lp_build_sample_partial_offset(bld,
2087 format_desc->block.width,
2088 x, x_stride,
2089 &offset, out_i);
2090
2091 if (y && y_stride) {
2092 LLVMValueRef y_offset;
2093 lp_build_sample_partial_offset(bld,
2094 format_desc->block.height,
2095 y, y_stride,
2096 &y_offset, out_j);
2097 offset = lp_build_add(bld, offset, y_offset);
2098 }
2099 else {
2100 *out_j = bld->zero;
2101 }
2102
2103 if (z && z_stride) {
2104 LLVMValueRef z_offset;
2105 LLVMValueRef k;
2106 lp_build_sample_partial_offset(bld,
2107 1, /* pixel blocks are always 2D */
2108 z, z_stride,
2109 &z_offset, &k);
2110 offset = lp_build_add(bld, offset, z_offset);
2111 }
2112
2113 *out_offset = offset;
2114 }