gallivm,llvmpipe: handle TXF (texelFetch) instruction, including offsets
[mesa.git] / src / gallium / auxiliary / gallivm / lp_bld_sample.c
1 /**************************************************************************
2 *
3 * Copyright 2009 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 /**
29 * @file
30 * Texture sampling -- common code.
31 *
32 * @author Jose Fonseca <jfonseca@vmware.com>
33 */
34
35 #include "pipe/p_defines.h"
36 #include "pipe/p_state.h"
37 #include "util/u_format.h"
38 #include "util/u_math.h"
39 #include "lp_bld_arit.h"
40 #include "lp_bld_const.h"
41 #include "lp_bld_debug.h"
42 #include "lp_bld_printf.h"
43 #include "lp_bld_flow.h"
44 #include "lp_bld_sample.h"
45 #include "lp_bld_swizzle.h"
46 #include "lp_bld_type.h"
47 #include "lp_bld_logic.h"
48 #include "lp_bld_pack.h"
49
50
51 /*
52 * Bri-linear factor. Should be greater than one.
53 */
54 #define BRILINEAR_FACTOR 2
55
56 /**
57 * Does the given texture wrap mode allow sampling the texture border color?
58 * XXX maybe move this into gallium util code.
59 */
60 boolean
61 lp_sampler_wrap_mode_uses_border_color(unsigned mode,
62 unsigned min_img_filter,
63 unsigned mag_img_filter)
64 {
65 switch (mode) {
66 case PIPE_TEX_WRAP_REPEAT:
67 case PIPE_TEX_WRAP_CLAMP_TO_EDGE:
68 case PIPE_TEX_WRAP_MIRROR_REPEAT:
69 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE:
70 return FALSE;
71 case PIPE_TEX_WRAP_CLAMP:
72 case PIPE_TEX_WRAP_MIRROR_CLAMP:
73 if (min_img_filter == PIPE_TEX_FILTER_NEAREST &&
74 mag_img_filter == PIPE_TEX_FILTER_NEAREST) {
75 return FALSE;
76 } else {
77 return TRUE;
78 }
79 case PIPE_TEX_WRAP_CLAMP_TO_BORDER:
80 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER:
81 return TRUE;
82 default:
83 assert(0 && "unexpected wrap mode");
84 return FALSE;
85 }
86 }
87
88
89 /**
90 * Initialize lp_sampler_static_state object with the gallium sampler
91 * and texture state.
92 * The former is considered to be static and the later dynamic.
93 */
94 void
95 lp_sampler_static_state(struct lp_sampler_static_state *state,
96 const struct pipe_sampler_view *view,
97 const struct pipe_sampler_state *sampler)
98 {
99 const struct pipe_resource *texture;
100
101 memset(state, 0, sizeof *state);
102
103 if (!sampler || !view || !view->texture)
104 return;
105
106 texture = view->texture;
107
108 /*
109 * We don't copy sampler state over unless it is actually enabled, to avoid
110 * spurious recompiles, as the sampler static state is part of the shader
111 * key.
112 *
113 * Ideally the state tracker or cso_cache module would make all state
114 * canonical, but until that happens it's better to be safe than sorry here.
115 *
116 * XXX: Actually there's much more than can be done here, especially
117 * regarding 1D/2D/3D/CUBE textures, wrap modes, etc.
118 */
119
120 state->format = view->format;
121 state->swizzle_r = view->swizzle_r;
122 state->swizzle_g = view->swizzle_g;
123 state->swizzle_b = view->swizzle_b;
124 state->swizzle_a = view->swizzle_a;
125
126 state->target = texture->target;
127 state->pot_width = util_is_power_of_two(texture->width0);
128 state->pot_height = util_is_power_of_two(texture->height0);
129 state->pot_depth = util_is_power_of_two(texture->depth0);
130
131 state->wrap_s = sampler->wrap_s;
132 state->wrap_t = sampler->wrap_t;
133 state->wrap_r = sampler->wrap_r;
134 state->min_img_filter = sampler->min_img_filter;
135 state->mag_img_filter = sampler->mag_img_filter;
136
137 if (view->u.tex.last_level && sampler->max_lod > 0.0f) {
138 state->min_mip_filter = sampler->min_mip_filter;
139 } else {
140 state->min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
141 }
142
143 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
144 if (sampler->lod_bias != 0.0f) {
145 state->lod_bias_non_zero = 1;
146 }
147
148 /* If min_lod == max_lod we can greatly simplify mipmap selection.
149 * This is a case that occurs during automatic mipmap generation.
150 */
151 if (sampler->min_lod == sampler->max_lod) {
152 state->min_max_lod_equal = 1;
153 } else {
154 if (sampler->min_lod > 0.0f) {
155 state->apply_min_lod = 1;
156 }
157
158 if (sampler->max_lod < (float)view->u.tex.last_level) {
159 state->apply_max_lod = 1;
160 }
161 }
162 }
163
164 state->compare_mode = sampler->compare_mode;
165 if (sampler->compare_mode != PIPE_TEX_COMPARE_NONE) {
166 state->compare_func = sampler->compare_func;
167 }
168
169 state->normalized_coords = sampler->normalized_coords;
170
171 /*
172 * FIXME: Handle the remainder of pipe_sampler_view.
173 */
174 }
175
176
177 /**
178 * Generate code to compute coordinate gradient (rho).
179 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
180 *
181 * The resulting rho is scalar per quad.
182 */
183 static LLVMValueRef
184 lp_build_rho(struct lp_build_sample_context *bld,
185 unsigned unit,
186 const struct lp_derivatives *derivs)
187 {
188 struct gallivm_state *gallivm = bld->gallivm;
189 struct lp_build_context *int_size_bld = &bld->int_size_in_bld;
190 struct lp_build_context *float_size_bld = &bld->float_size_in_bld;
191 struct lp_build_context *float_bld = &bld->float_bld;
192 struct lp_build_context *coord_bld = &bld->coord_bld;
193 struct lp_build_context *perquadf_bld = &bld->perquadf_bld;
194 const LLVMValueRef *ddx_ddy = derivs->ddx_ddy;
195 const unsigned dims = bld->dims;
196 LLVMBuilderRef builder = bld->gallivm->builder;
197 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
198 LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0);
199 LLVMValueRef index1 = LLVMConstInt(i32t, 1, 0);
200 LLVMValueRef index2 = LLVMConstInt(i32t, 2, 0);
201 LLVMValueRef rho_vec;
202 LLVMValueRef int_size, float_size;
203 LLVMValueRef rho;
204 LLVMValueRef first_level, first_level_vec;
205 LLVMValueRef abs_ddx_ddy[2];
206 unsigned length = coord_bld->type.length;
207 unsigned num_quads = length / 4;
208 unsigned i;
209 LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context));
210 LLVMValueRef rho_xvec, rho_yvec;
211
212 abs_ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]);
213 if (dims > 2) {
214 abs_ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]);
215 }
216 else {
217 abs_ddx_ddy[1] = NULL;
218 }
219
220 if (dims == 1) {
221 static const unsigned char swizzle1[] = {
222 0, LP_BLD_SWIZZLE_DONTCARE,
223 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
224 };
225 static const unsigned char swizzle2[] = {
226 1, LP_BLD_SWIZZLE_DONTCARE,
227 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
228 };
229 rho_xvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle1);
230 rho_yvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle2);
231 }
232 else if (dims == 2) {
233 static const unsigned char swizzle1[] = {
234 0, 2,
235 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
236 };
237 static const unsigned char swizzle2[] = {
238 1, 3,
239 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
240 };
241 rho_xvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle1);
242 rho_yvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle2);
243 }
244 else {
245 LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH];
246 LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH];
247 assert(dims == 3);
248 for (i = 0; i < num_quads; i++) {
249 shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i);
250 shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2);
251 shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i);
252 shuffles1[4*i + 3] = i32undef;
253 shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1);
254 shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3);
255 shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 1);
256 shuffles2[4*i + 3] = i32undef;
257 }
258 rho_xvec = LLVMBuildShuffleVector(builder, abs_ddx_ddy[0], abs_ddx_ddy[1],
259 LLVMConstVector(shuffles1, length), "");
260 rho_yvec = LLVMBuildShuffleVector(builder, abs_ddx_ddy[0], abs_ddx_ddy[1],
261 LLVMConstVector(shuffles2, length), "");
262 }
263
264 rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec);
265
266 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
267 bld->gallivm, unit);
268 first_level_vec = lp_build_broadcast_scalar(&bld->int_size_bld, first_level);
269 int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec);
270 float_size = lp_build_int_to_float(float_size_bld, int_size);
271
272 if (bld->coord_type.length > 4) {
273 /* expand size to each quad */
274 if (dims > 1) {
275 /* could use some broadcast_vector helper for this? */
276 int num_quads = bld->coord_type.length / 4;
277 LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4];
278 for (i = 0; i < num_quads; i++) {
279 src[i] = float_size;
280 }
281 float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads);
282 }
283 else {
284 float_size = lp_build_broadcast_scalar(coord_bld, float_size);
285 }
286 rho_vec = lp_build_mul(coord_bld, rho_vec, float_size);
287
288 if (dims <= 1) {
289 rho = rho_vec;
290 }
291 else {
292 if (dims >= 2) {
293 static const unsigned char swizzle1[] = {
294 0, LP_BLD_SWIZZLE_DONTCARE,
295 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
296 };
297 static const unsigned char swizzle2[] = {
298 1, LP_BLD_SWIZZLE_DONTCARE,
299 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
300 };
301 LLVMValueRef rho_s, rho_t, rho_r;
302
303 rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1);
304 rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2);
305
306 rho = lp_build_max(coord_bld, rho_s, rho_t);
307
308 if (dims >= 3) {
309 static const unsigned char swizzle3[] = {
310 2, LP_BLD_SWIZZLE_DONTCARE,
311 LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE
312 };
313 rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle3);
314 rho = lp_build_max(coord_bld, rho, rho_r);
315 }
316 }
317 }
318 rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type,
319 perquadf_bld->type, rho, 0);
320 }
321 else {
322 if (dims <= 1) {
323 rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, "");
324 }
325 rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size);
326
327 if (dims <= 1) {
328 rho = rho_vec;
329 }
330 else {
331 if (dims >= 2) {
332 LLVMValueRef rho_s, rho_t, rho_r;
333
334 rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, "");
335 rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, "");
336
337 rho = lp_build_max(float_bld, rho_s, rho_t);
338
339 if (dims >= 3) {
340 rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, "");
341 rho = lp_build_max(float_bld, rho, rho_r);
342 }
343 }
344 }
345 }
346
347 return rho;
348 }
349
350
351 /*
352 * Bri-linear lod computation
353 *
354 * Use a piece-wise linear approximation of log2 such that:
355 * - round to nearest, for values in the neighborhood of -1, 0, 1, 2, etc.
356 * - linear approximation for values in the neighborhood of 0.5, 1.5., etc,
357 * with the steepness specified in 'factor'
358 * - exact result for 0.5, 1.5, etc.
359 *
360 *
361 * 1.0 - /----*
362 * /
363 * /
364 * /
365 * 0.5 - *
366 * /
367 * /
368 * /
369 * 0.0 - *----/
370 *
371 * | |
372 * 2^0 2^1
373 *
374 * This is a technique also commonly used in hardware:
375 * - http://ixbtlabs.com/articles2/gffx/nv40-rx800-3.html
376 *
377 * TODO: For correctness, this should only be applied when texture is known to
378 * have regular mipmaps, i.e., mipmaps derived from the base level.
379 *
380 * TODO: This could be done in fixed point, where applicable.
381 */
382 static void
383 lp_build_brilinear_lod(struct lp_build_context *bld,
384 LLVMValueRef lod,
385 double factor,
386 LLVMValueRef *out_lod_ipart,
387 LLVMValueRef *out_lod_fpart)
388 {
389 LLVMValueRef lod_fpart;
390 double pre_offset = (factor - 0.5)/factor - 0.5;
391 double post_offset = 1 - factor;
392
393 if (0) {
394 lp_build_printf(bld->gallivm, "lod = %f\n", lod);
395 }
396
397 lod = lp_build_add(bld, lod,
398 lp_build_const_vec(bld->gallivm, bld->type, pre_offset));
399
400 lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart);
401
402 lod_fpart = lp_build_mul(bld, lod_fpart,
403 lp_build_const_vec(bld->gallivm, bld->type, factor));
404
405 lod_fpart = lp_build_add(bld, lod_fpart,
406 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
407
408 /*
409 * It's not necessary to clamp lod_fpart since:
410 * - the above expression will never produce numbers greater than one.
411 * - the mip filtering branch is only taken if lod_fpart is positive
412 */
413
414 *out_lod_fpart = lod_fpart;
415
416 if (0) {
417 lp_build_printf(bld->gallivm, "lod_ipart = %i\n", *out_lod_ipart);
418 lp_build_printf(bld->gallivm, "lod_fpart = %f\n\n", *out_lod_fpart);
419 }
420 }
421
422
423 /*
424 * Combined log2 and brilinear lod computation.
425 *
426 * It's in all identical to calling lp_build_fast_log2() and
427 * lp_build_brilinear_lod() above, but by combining we can compute the integer
428 * and fractional part independently.
429 */
430 static void
431 lp_build_brilinear_rho(struct lp_build_context *bld,
432 LLVMValueRef rho,
433 double factor,
434 LLVMValueRef *out_lod_ipart,
435 LLVMValueRef *out_lod_fpart)
436 {
437 LLVMValueRef lod_ipart;
438 LLVMValueRef lod_fpart;
439
440 const double pre_factor = (2*factor - 0.5)/(M_SQRT2*factor);
441 const double post_offset = 1 - 2*factor;
442
443 assert(bld->type.floating);
444
445 assert(lp_check_value(bld->type, rho));
446
447 /*
448 * The pre factor will make the intersections with the exact powers of two
449 * happen precisely where we want then to be, which means that the integer
450 * part will not need any post adjustments.
451 */
452 rho = lp_build_mul(bld, rho,
453 lp_build_const_vec(bld->gallivm, bld->type, pre_factor));
454
455 /* ipart = ifloor(log2(rho)) */
456 lod_ipart = lp_build_extract_exponent(bld, rho, 0);
457
458 /* fpart = rho / 2**ipart */
459 lod_fpart = lp_build_extract_mantissa(bld, rho);
460
461 lod_fpart = lp_build_mul(bld, lod_fpart,
462 lp_build_const_vec(bld->gallivm, bld->type, factor));
463
464 lod_fpart = lp_build_add(bld, lod_fpart,
465 lp_build_const_vec(bld->gallivm, bld->type, post_offset));
466
467 /*
468 * Like lp_build_brilinear_lod, it's not necessary to clamp lod_fpart since:
469 * - the above expression will never produce numbers greater than one.
470 * - the mip filtering branch is only taken if lod_fpart is positive
471 */
472
473 *out_lod_ipart = lod_ipart;
474 *out_lod_fpart = lod_fpart;
475 }
476
477
478 /**
479 * Generate code to compute texture level of detail (lambda).
480 * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y
481 * \param lod_bias optional float vector with the shader lod bias
482 * \param explicit_lod optional float vector with the explicit lod
483 * \param width scalar int texture width
484 * \param height scalar int texture height
485 * \param depth scalar int texture depth
486 *
487 * The resulting lod is scalar per quad, so only the first value per quad
488 * passed in from lod_bias, explicit_lod is used.
489 */
490 void
491 lp_build_lod_selector(struct lp_build_sample_context *bld,
492 unsigned unit,
493 const struct lp_derivatives *derivs,
494 LLVMValueRef lod_bias, /* optional */
495 LLVMValueRef explicit_lod, /* optional */
496 unsigned mip_filter,
497 LLVMValueRef *out_lod_ipart,
498 LLVMValueRef *out_lod_fpart)
499
500 {
501 LLVMBuilderRef builder = bld->gallivm->builder;
502 struct lp_build_context *perquadf_bld = &bld->perquadf_bld;
503 LLVMValueRef lod;
504
505 *out_lod_ipart = bld->perquadi_bld.zero;
506 *out_lod_fpart = perquadf_bld->zero;
507
508 if (bld->static_state->min_max_lod_equal) {
509 /* User is forcing sampling from a particular mipmap level.
510 * This is hit during mipmap generation.
511 */
512 LLVMValueRef min_lod =
513 bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit);
514
515 lod = lp_build_broadcast_scalar(perquadf_bld, min_lod);
516 }
517 else {
518 if (explicit_lod) {
519 lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
520 perquadf_bld->type, explicit_lod, 0);
521 }
522 else {
523 LLVMValueRef rho;
524
525 rho = lp_build_rho(bld, unit, derivs);
526
527 /*
528 * Compute lod = log2(rho)
529 */
530
531 if (!lod_bias &&
532 !bld->static_state->lod_bias_non_zero &&
533 !bld->static_state->apply_max_lod &&
534 !bld->static_state->apply_min_lod) {
535 /*
536 * Special case when there are no post-log2 adjustments, which
537 * saves instructions but keeping the integer and fractional lod
538 * computations separate from the start.
539 */
540
541 if (mip_filter == PIPE_TEX_MIPFILTER_NONE ||
542 mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
543 *out_lod_ipart = lp_build_ilog2(perquadf_bld, rho);
544 *out_lod_fpart = perquadf_bld->zero;
545 return;
546 }
547 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR &&
548 !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
549 lp_build_brilinear_rho(perquadf_bld, rho, BRILINEAR_FACTOR,
550 out_lod_ipart, out_lod_fpart);
551 return;
552 }
553 }
554
555 if (0) {
556 lod = lp_build_log2(perquadf_bld, rho);
557 }
558 else {
559 lod = lp_build_fast_log2(perquadf_bld, rho);
560 }
561
562 /* add shader lod bias */
563 if (lod_bias) {
564 lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type,
565 perquadf_bld->type, lod_bias, 0);
566 lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias");
567 }
568 }
569
570 /* add sampler lod bias */
571 if (bld->static_state->lod_bias_non_zero) {
572 LLVMValueRef sampler_lod_bias =
573 bld->dynamic_state->lod_bias(bld->dynamic_state, bld->gallivm, unit);
574 sampler_lod_bias = lp_build_broadcast_scalar(perquadf_bld,
575 sampler_lod_bias);
576 lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias");
577 }
578
579 /* clamp lod */
580 if (bld->static_state->apply_max_lod) {
581 LLVMValueRef max_lod =
582 bld->dynamic_state->max_lod(bld->dynamic_state, bld->gallivm, unit);
583 max_lod = lp_build_broadcast_scalar(perquadf_bld, max_lod);
584
585 lod = lp_build_min(perquadf_bld, lod, max_lod);
586 }
587 if (bld->static_state->apply_min_lod) {
588 LLVMValueRef min_lod =
589 bld->dynamic_state->min_lod(bld->dynamic_state, bld->gallivm, unit);
590 min_lod = lp_build_broadcast_scalar(perquadf_bld, min_lod);
591
592 lod = lp_build_max(perquadf_bld, lod, min_lod);
593 }
594 }
595
596 if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
597 if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) {
598 lp_build_brilinear_lod(perquadf_bld, lod, BRILINEAR_FACTOR,
599 out_lod_ipart, out_lod_fpart);
600 }
601 else {
602 lp_build_ifloor_fract(perquadf_bld, lod, out_lod_ipart, out_lod_fpart);
603 }
604
605 lp_build_name(*out_lod_fpart, "lod_fpart");
606 }
607 else {
608 *out_lod_ipart = lp_build_iround(perquadf_bld, lod);
609 }
610
611 lp_build_name(*out_lod_ipart, "lod_ipart");
612
613 return;
614 }
615
616
617 /**
618 * For PIPE_TEX_MIPFILTER_NEAREST, convert float LOD to integer
619 * mipmap level index.
620 * Note: this is all scalar per quad code.
621 * \param lod_ipart int texture level of detail
622 * \param level_out returns integer
623 */
624 void
625 lp_build_nearest_mip_level(struct lp_build_sample_context *bld,
626 unsigned unit,
627 LLVMValueRef lod_ipart,
628 LLVMValueRef *level_out)
629 {
630 struct lp_build_context *perquadi_bld = &bld->perquadi_bld;
631 LLVMValueRef first_level, last_level, level;
632
633 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
634 bld->gallivm, unit);
635 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
636 bld->gallivm, unit);
637 first_level = lp_build_broadcast_scalar(perquadi_bld, first_level);
638 last_level = lp_build_broadcast_scalar(perquadi_bld, last_level);
639
640 level = lp_build_add(perquadi_bld, lod_ipart, first_level);
641
642 /* clamp level to legal range of levels */
643 *level_out = lp_build_clamp(perquadi_bld, level, first_level, last_level);
644 }
645
646
647 /**
648 * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad int LOD(s) to two (per-quad)
649 * (adjacent) mipmap level indexes, and fix up float lod part accordingly.
650 * Later, we'll sample from those two mipmap levels and interpolate between them.
651 */
652 void
653 lp_build_linear_mip_levels(struct lp_build_sample_context *bld,
654 unsigned unit,
655 LLVMValueRef lod_ipart,
656 LLVMValueRef *lod_fpart_inout,
657 LLVMValueRef *level0_out,
658 LLVMValueRef *level1_out)
659 {
660 LLVMBuilderRef builder = bld->gallivm->builder;
661 struct lp_build_context *perquadi_bld = &bld->perquadi_bld;
662 struct lp_build_context *perquadf_bld = &bld->perquadf_bld;
663 LLVMValueRef first_level, last_level;
664 LLVMValueRef clamp_min;
665 LLVMValueRef clamp_max;
666
667 first_level = bld->dynamic_state->first_level(bld->dynamic_state,
668 bld->gallivm, unit);
669 last_level = bld->dynamic_state->last_level(bld->dynamic_state,
670 bld->gallivm, unit);
671 first_level = lp_build_broadcast_scalar(perquadi_bld, first_level);
672 last_level = lp_build_broadcast_scalar(perquadi_bld, last_level);
673
674 *level0_out = lp_build_add(perquadi_bld, lod_ipart, first_level);
675 *level1_out = lp_build_add(perquadi_bld, *level0_out, perquadi_bld->one);
676
677 /*
678 * Clamp both *level0_out and *level1_out to [first_level, last_level], with
679 * the minimum number of comparisons, and zeroing lod_fpart in the extreme
680 * ends in the process.
681 */
682
683 /*
684 * This code (vector select in particular) only works with llvm 3.1
685 * (if there's more than one quad, with x86 backend). Might consider
686 * converting to our lp_bld_logic helpers.
687 */
688 #if HAVE_LLVM < 0x0301
689 assert(perquadi_bld->type.length == 1);
690 #endif
691
692 /* *level0_out < first_level */
693 clamp_min = LLVMBuildICmp(builder, LLVMIntSLT,
694 *level0_out, first_level,
695 "clamp_lod_to_first");
696
697 *level0_out = LLVMBuildSelect(builder, clamp_min,
698 first_level, *level0_out, "");
699
700 *level1_out = LLVMBuildSelect(builder, clamp_min,
701 first_level, *level1_out, "");
702
703 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min,
704 perquadf_bld->zero, *lod_fpart_inout, "");
705
706 /* *level0_out >= last_level */
707 clamp_max = LLVMBuildICmp(builder, LLVMIntSGE,
708 *level0_out, last_level,
709 "clamp_lod_to_last");
710
711 *level0_out = LLVMBuildSelect(builder, clamp_max,
712 last_level, *level0_out, "");
713
714 *level1_out = LLVMBuildSelect(builder, clamp_max,
715 last_level, *level1_out, "");
716
717 *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max,
718 perquadf_bld->zero, *lod_fpart_inout, "");
719
720 lp_build_name(*level0_out, "sampler%u_miplevel0", unit);
721 lp_build_name(*level1_out, "sampler%u_miplevel1", unit);
722 lp_build_name(*lod_fpart_inout, "sampler%u_mipweight", unit);
723 }
724
725
726 /**
727 * Return pointer to a single mipmap level.
728 * \param level integer mipmap level
729 */
730 LLVMValueRef
731 lp_build_get_mipmap_level(struct lp_build_sample_context *bld,
732 LLVMValueRef level)
733 {
734 LLVMBuilderRef builder = bld->gallivm->builder;
735 LLVMValueRef indexes[2], data_ptr, mip_offset;
736
737 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
738 indexes[1] = level;
739 mip_offset = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
740 mip_offset = LLVMBuildLoad(builder, mip_offset, "");
741 data_ptr = LLVMBuildGEP(builder, bld->base_ptr, &mip_offset, 1, "");
742 return data_ptr;
743 }
744
745 /**
746 * Return (per-pixel) offsets to mip levels.
747 * \param level integer mipmap level
748 */
749 LLVMValueRef
750 lp_build_get_mip_offsets(struct lp_build_sample_context *bld,
751 LLVMValueRef level)
752 {
753 LLVMBuilderRef builder = bld->gallivm->builder;
754 LLVMValueRef indexes[2], offsets, offset1;
755
756 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
757 if (bld->num_lods == 1) {
758 indexes[1] = level;
759 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
760 offset1 = LLVMBuildLoad(builder, offset1, "");
761 offsets = lp_build_broadcast_scalar(&bld->int_coord_bld, offset1);
762 }
763 else if (bld->num_lods == bld->coord_bld.type.length / 4) {
764 unsigned i;
765
766 offsets = bld->int_coord_bld.undef;
767 for (i = 0; i < bld->num_lods; i++) {
768 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
769 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i);
770 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
771 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
772 offset1 = LLVMBuildLoad(builder, offset1, "");
773 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexo, "");
774 }
775 offsets = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, offsets, 0);
776 }
777 else {
778 unsigned i;
779
780 assert (bld->num_lods == bld->coord_bld.type.length);
781
782 offsets = bld->int_coord_bld.undef;
783 for (i = 0; i < bld->num_lods; i++) {
784 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
785 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
786 offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, "");
787 offset1 = LLVMBuildLoad(builder, offset1, "");
788 offsets = LLVMBuildInsertElement(builder, offsets, offset1, indexi, "");
789 }
790 }
791 return offsets;
792 }
793
794
795 /**
796 * Codegen equivalent for u_minify().
797 * Return max(1, base_size >> level);
798 */
799 LLVMValueRef
800 lp_build_minify(struct lp_build_context *bld,
801 LLVMValueRef base_size,
802 LLVMValueRef level)
803 {
804 LLVMBuilderRef builder = bld->gallivm->builder;
805 assert(lp_check_value(bld->type, base_size));
806 assert(lp_check_value(bld->type, level));
807
808 if (level == bld->zero) {
809 /* if we're using mipmap level zero, no minification is needed */
810 return base_size;
811 }
812 else {
813 LLVMValueRef size =
814 LLVMBuildLShr(builder, base_size, level, "minify");
815 assert(bld->type.sign);
816 size = lp_build_max(bld, size, bld->one);
817 return size;
818 }
819 }
820
821
822 /**
823 * Dereference stride_array[mipmap_level] array to get a stride.
824 * Return stride as a vector.
825 */
826 static LLVMValueRef
827 lp_build_get_level_stride_vec(struct lp_build_sample_context *bld,
828 LLVMValueRef stride_array, LLVMValueRef level)
829 {
830 LLVMBuilderRef builder = bld->gallivm->builder;
831 LLVMValueRef indexes[2], stride, stride1;
832 indexes[0] = lp_build_const_int32(bld->gallivm, 0);
833 if (bld->num_lods == 1) {
834 indexes[1] = level;
835 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
836 stride1 = LLVMBuildLoad(builder, stride1, "");
837 stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride1);
838 }
839 else if (bld->num_lods == bld->coord_bld.type.length / 4) {
840 LLVMValueRef stride1;
841 unsigned i;
842
843 stride = bld->int_coord_bld.undef;
844 for (i = 0; i < bld->num_lods; i++) {
845 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
846 LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, i);
847 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
848 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
849 stride1 = LLVMBuildLoad(builder, stride1, "");
850 stride = LLVMBuildInsertElement(builder, stride, stride1, indexo, "");
851 }
852 stride = lp_build_swizzle_scalar_aos(&bld->int_coord_bld, stride, 0);
853 }
854 else {
855 LLVMValueRef stride1;
856 unsigned i;
857
858 assert (bld->num_lods == bld->coord_bld.type.length);
859
860 stride = bld->int_coord_bld.undef;
861 for (i = 0; i < bld->coord_bld.type.length; i++) {
862 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
863 indexes[1] = LLVMBuildExtractElement(builder, level, indexi, "");
864 stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, "");
865 stride1 = LLVMBuildLoad(builder, stride1, "");
866 stride = LLVMBuildInsertElement(builder, stride, stride1, indexi, "");
867 }
868 }
869 return stride;
870 }
871
872
873 /**
874 * When sampling a mipmap, we need to compute the width, height, depth
875 * of the source levels from the level indexes. This helper function
876 * does that.
877 */
878 void
879 lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld,
880 LLVMValueRef ilevel,
881 LLVMValueRef *out_size,
882 LLVMValueRef *row_stride_vec,
883 LLVMValueRef *img_stride_vec)
884 {
885 const unsigned dims = bld->dims;
886 LLVMValueRef ilevel_vec;
887
888 /*
889 * Compute width, height, depth at mipmap level 'ilevel'
890 */
891 if (bld->num_lods == 1) {
892 ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel);
893 *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec);
894 }
895 else {
896 LLVMValueRef int_size_vec;
897 LLVMValueRef tmp[LP_MAX_VECTOR_LENGTH];
898 unsigned num_quads = bld->coord_bld.type.length / 4;
899 unsigned i;
900
901 if (bld->num_lods == num_quads) {
902 /*
903 * XXX: this should be #ifndef SANE_INSTRUCTION_SET.
904 * intel "forgot" the variable shift count instruction until avx2.
905 * A harmless 8x32 shift gets translated into 32 instructions
906 * (16 extracts, 8 scalar shifts, 8 inserts), llvm is apparently
907 * unable to recognize if there are really just 2 different shift
908 * count values. So do the shift 4-wide before expansion.
909 */
910 struct lp_build_context bld4;
911 struct lp_type type4;
912
913 type4 = bld->int_coord_bld.type;
914 type4.length = 4;
915
916 lp_build_context_init(&bld4, bld->gallivm, type4);
917
918 if (bld->dims == 1) {
919 assert(bld->int_size_in_bld.type.length == 1);
920 int_size_vec = lp_build_broadcast_scalar(&bld4,
921 bld->int_size);
922 }
923 else {
924 assert(bld->int_size_in_bld.type.length == 4);
925 int_size_vec = bld->int_size;
926 }
927
928 for (i = 0; i < num_quads; i++) {
929 LLVMValueRef ileveli;
930 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
931
932 ileveli = lp_build_extract_broadcast(bld->gallivm,
933 bld->perquadi_bld.type,
934 bld4.type,
935 ilevel,
936 indexi);
937 tmp[i] = lp_build_minify(&bld4, int_size_vec, ileveli);
938 }
939 /*
940 * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1,
941 * [w0, w0, w0, w0, w1, w1, w1, w1, ...] otherwise.
942 */
943 *out_size = lp_build_concat(bld->gallivm,
944 tmp,
945 bld4.type,
946 num_quads);
947 }
948 else {
949 /* FIXME: this is terrible and results in _huge_ vector
950 * (for the dims > 1 case).
951 * Should refactor this (together with extract_image_sizes) and do
952 * something more useful. Could for instance if we have width,height
953 * with 4-wide vector pack all elements into a 8xi16 vector
954 * (on which we can still do useful math) instead of using a 16xi32
955 * vector.
956 * FIXME: some callers can't handle this yet.
957 * For dims == 1 this will create [w0, w1, w2, w3, ...] vector.
958 * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector.
959 */
960 assert(bld->num_lods == bld->coord_bld.type.length);
961 if (bld->dims == 1) {
962 assert(bld->int_size_bld.type.length == 1);
963 int_size_vec = lp_build_broadcast_scalar(&bld->int_coord_bld,
964 bld->int_size);
965 /* vector shift with variable shift count alert... */
966 *out_size = lp_build_minify(&bld->int_coord_bld, int_size_vec, ilevel);
967 }
968 else {
969 LLVMValueRef ilevel1;
970 for (i = 0; i < bld->num_lods; i++) {
971 LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i);
972 ilevel1 = lp_build_extract_broadcast(bld->gallivm, bld->int_coord_type,
973 bld->int_size_in_bld.type, ilevel, indexi);
974 tmp[i] = bld->int_size;
975 tmp[i] = lp_build_minify(&bld->int_size_in_bld, tmp[i], ilevel1);
976 }
977 int_size_vec = lp_build_concat(bld->gallivm,
978 tmp,
979 bld->int_size_in_bld.type,
980 bld->num_lods);
981 }
982 }
983 }
984
985 if (dims >= 2) {
986 *row_stride_vec = lp_build_get_level_stride_vec(bld,
987 bld->row_stride_array,
988 ilevel);
989 if (dims == 3 || bld->static_state->target == PIPE_TEXTURE_CUBE) {
990 *img_stride_vec = lp_build_get_level_stride_vec(bld,
991 bld->img_stride_array,
992 ilevel);
993 }
994 }
995 }
996
997
998 /**
999 * Extract and broadcast texture size.
1000 *
1001 * @param size_type type of the texture size vector (either
1002 * bld->int_size_type or bld->float_size_type)
1003 * @param coord_type type of the texture size vector (either
1004 * bld->int_coord_type or bld->coord_type)
1005 * @param size vector with the texture size (width, height, depth)
1006 */
1007 void
1008 lp_build_extract_image_sizes(struct lp_build_sample_context *bld,
1009 struct lp_build_context *size_bld,
1010 struct lp_type coord_type,
1011 LLVMValueRef size,
1012 LLVMValueRef *out_width,
1013 LLVMValueRef *out_height,
1014 LLVMValueRef *out_depth)
1015 {
1016 const unsigned dims = bld->dims;
1017 LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context);
1018 struct lp_type size_type = size_bld->type;
1019
1020 if (bld->num_lods == 1) {
1021 *out_width = lp_build_extract_broadcast(bld->gallivm,
1022 size_type,
1023 coord_type,
1024 size,
1025 LLVMConstInt(i32t, 0, 0));
1026 if (dims >= 2) {
1027 *out_height = lp_build_extract_broadcast(bld->gallivm,
1028 size_type,
1029 coord_type,
1030 size,
1031 LLVMConstInt(i32t, 1, 0));
1032 if (dims == 3) {
1033 *out_depth = lp_build_extract_broadcast(bld->gallivm,
1034 size_type,
1035 coord_type,
1036 size,
1037 LLVMConstInt(i32t, 2, 0));
1038 }
1039 }
1040 }
1041 else {
1042 unsigned num_quads = bld->coord_bld.type.length / 4;
1043
1044 if (dims == 1) {
1045 *out_width = size;
1046 }
1047 else if (bld->num_lods == num_quads) {
1048 *out_width = lp_build_swizzle_scalar_aos(size_bld, size, 0);
1049 if (dims >= 2) {
1050 *out_height = lp_build_swizzle_scalar_aos(size_bld, size, 1);
1051 if (dims == 3) {
1052 *out_depth = lp_build_swizzle_scalar_aos(size_bld, size, 2);
1053 }
1054 }
1055 }
1056 else {
1057 assert(bld->num_lods == bld->coord_type.length);
1058 *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1059 coord_type, size, 0);
1060 if (dims >= 2) {
1061 *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1062 coord_type, size, 1);
1063 if (dims == 3) {
1064 *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type,
1065 coord_type, size, 2);
1066 }
1067 }
1068 }
1069 }
1070 }
1071
1072
1073 /**
1074 * Unnormalize coords.
1075 *
1076 * @param flt_size vector with the integer texture size (width, height, depth)
1077 */
1078 void
1079 lp_build_unnormalized_coords(struct lp_build_sample_context *bld,
1080 LLVMValueRef flt_size,
1081 LLVMValueRef *s,
1082 LLVMValueRef *t,
1083 LLVMValueRef *r)
1084 {
1085 const unsigned dims = bld->dims;
1086 LLVMValueRef width;
1087 LLVMValueRef height;
1088 LLVMValueRef depth;
1089
1090 lp_build_extract_image_sizes(bld,
1091 &bld->float_size_bld,
1092 bld->coord_type,
1093 flt_size,
1094 &width,
1095 &height,
1096 &depth);
1097
1098 /* s = s * width, t = t * height */
1099 *s = lp_build_mul(&bld->coord_bld, *s, width);
1100 if (dims >= 2) {
1101 *t = lp_build_mul(&bld->coord_bld, *t, height);
1102 if (dims >= 3) {
1103 *r = lp_build_mul(&bld->coord_bld, *r, depth);
1104 }
1105 }
1106 }
1107
1108
1109 /** Helper used by lp_build_cube_lookup() */
1110 static LLVMValueRef
1111 lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord)
1112 {
1113 /* ima = +0.5 / abs(coord); */
1114 LLVMValueRef posHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1115 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1116 LLVMValueRef ima = lp_build_div(coord_bld, posHalf, absCoord);
1117 return ima;
1118 }
1119
1120 /** Helper used by lp_build_cube_lookup() */
1121 static LLVMValueRef
1122 lp_build_cube_imaneg(struct lp_build_context *coord_bld, LLVMValueRef coord)
1123 {
1124 /* ima = -0.5 / abs(coord); */
1125 LLVMValueRef negHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, -0.5);
1126 LLVMValueRef absCoord = lp_build_abs(coord_bld, coord);
1127 LLVMValueRef ima = lp_build_div(coord_bld, negHalf, absCoord);
1128 return ima;
1129 }
1130
1131 /**
1132 * Helper used by lp_build_cube_lookup()
1133 * FIXME: the sign here can also be 0.
1134 * Arithmetically this could definitely make a difference. Either
1135 * fix the comment or use other (simpler) sign function, not sure
1136 * which one it should be.
1137 * \param sign scalar +1 or -1
1138 * \param coord float vector
1139 * \param ima float vector
1140 */
1141 static LLVMValueRef
1142 lp_build_cube_coord(struct lp_build_context *coord_bld,
1143 LLVMValueRef sign, int negate_coord,
1144 LLVMValueRef coord, LLVMValueRef ima)
1145 {
1146 /* return negate(coord) * ima * sign + 0.5; */
1147 LLVMValueRef half = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5);
1148 LLVMValueRef res;
1149
1150 assert(negate_coord == +1 || negate_coord == -1);
1151
1152 if (negate_coord == -1) {
1153 coord = lp_build_negate(coord_bld, coord);
1154 }
1155
1156 res = lp_build_mul(coord_bld, coord, ima);
1157 if (sign) {
1158 sign = lp_build_broadcast_scalar(coord_bld, sign);
1159 res = lp_build_mul(coord_bld, res, sign);
1160 }
1161 res = lp_build_add(coord_bld, res, half);
1162
1163 return res;
1164 }
1165
1166
1167 /** Helper used by lp_build_cube_lookup()
1168 * Return (major_coord >= 0) ? pos_face : neg_face;
1169 */
1170 static LLVMValueRef
1171 lp_build_cube_face(struct lp_build_sample_context *bld,
1172 LLVMValueRef major_coord,
1173 unsigned pos_face, unsigned neg_face)
1174 {
1175 struct gallivm_state *gallivm = bld->gallivm;
1176 LLVMBuilderRef builder = gallivm->builder;
1177 LLVMValueRef cmp = LLVMBuildFCmp(builder, LLVMRealUGE,
1178 major_coord,
1179 bld->float_bld.zero, "");
1180 LLVMValueRef pos = lp_build_const_int32(gallivm, pos_face);
1181 LLVMValueRef neg = lp_build_const_int32(gallivm, neg_face);
1182 LLVMValueRef res = LLVMBuildSelect(builder, cmp, pos, neg, "");
1183 return res;
1184 }
1185
1186
1187
1188 /**
1189 * Generate code to do cube face selection and compute per-face texcoords.
1190 */
1191 void
1192 lp_build_cube_lookup(struct lp_build_sample_context *bld,
1193 LLVMValueRef s,
1194 LLVMValueRef t,
1195 LLVMValueRef r,
1196 LLVMValueRef *face,
1197 LLVMValueRef *face_s,
1198 LLVMValueRef *face_t)
1199 {
1200 struct lp_build_context *coord_bld = &bld->coord_bld;
1201 LLVMBuilderRef builder = bld->gallivm->builder;
1202 struct gallivm_state *gallivm = bld->gallivm;
1203 LLVMValueRef rx, ry, rz;
1204 LLVMValueRef tmp[4], rxyz, arxyz;
1205
1206 /*
1207 * Use the average of the four pixel's texcoords to choose the face.
1208 * Slight simplification just calculate the sum, skip scaling.
1209 */
1210 tmp[0] = s;
1211 tmp[1] = t;
1212 tmp[2] = r;
1213 rxyz = lp_build_hadd_partial4(&bld->coord_bld, tmp, 3);
1214 arxyz = lp_build_abs(&bld->coord_bld, rxyz);
1215
1216 if (coord_bld->type.length > 4) {
1217 struct lp_build_context *cint_bld = &bld->int_coord_bld;
1218 struct lp_type intctype = cint_bld->type;
1219 LLVMValueRef signrxs, signrys, signrzs, signrxyz, sign;
1220 LLVMValueRef arxs, arys, arzs;
1221 LLVMValueRef arx_ge_ary, maxarxsarys, arz_ge_arx_ary;
1222 LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz;
1223 LLVMValueRef ryneg, rzneg;
1224 LLVMValueRef ma, ima;
1225 LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5);
1226 LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype,
1227 1 << (intctype.width - 1));
1228 LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype,
1229 intctype.width -1);
1230 LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X);
1231 LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y);
1232 LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z);
1233
1234 assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1);
1235 assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1);
1236 assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1);
1237
1238 rx = LLVMBuildBitCast(builder, s, lp_build_vec_type(gallivm, intctype), "");
1239 ry = LLVMBuildBitCast(builder, t, lp_build_vec_type(gallivm, intctype), "");
1240 rz = LLVMBuildBitCast(builder, r, lp_build_vec_type(gallivm, intctype), "");
1241 ryneg = LLVMBuildXor(builder, ry, signmask, "");
1242 rzneg = LLVMBuildXor(builder, rz, signmask, "");
1243
1244 /* the sign bit comes from the averaged vector (per quad),
1245 * as does the decision which face to use */
1246 signrxyz = LLVMBuildBitCast(builder, rxyz, lp_build_vec_type(gallivm, intctype), "");
1247 signrxyz = LLVMBuildAnd(builder, signrxyz, signmask, "");
1248
1249 arxs = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 0);
1250 arys = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 1);
1251 arzs = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 2);
1252
1253 /*
1254 * select x if x >= y else select y
1255 * select previous result if y >= max(x,y) else select z
1256 */
1257 arx_ge_ary = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, arxs, arys);
1258 maxarxsarys = lp_build_max(coord_bld, arxs, arys);
1259 arz_ge_arx_ary = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, maxarxsarys, arzs);
1260
1261 /*
1262 * compute all possible new s/t coords
1263 * snewx = signrx * -rz;
1264 * tnewx = -ry;
1265 * snewy = rx;
1266 * tnewy = signry * rz;
1267 * snewz = signrz * rx;
1268 * tnewz = -ry;
1269 */
1270 signrxs = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 0);
1271 snewx = LLVMBuildXor(builder, signrxs, rzneg, "");
1272 tnewx = ryneg;
1273
1274 signrys = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 1);
1275 snewy = rx;
1276 tnewy = LLVMBuildXor(builder, signrys, rz, "");
1277
1278 signrzs = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 2);
1279 snewz = LLVMBuildXor(builder, signrzs, rx, "");
1280 tnewz = ryneg;
1281
1282 /* XXX on x86 unclear if we should cast the values back to float
1283 * or not - on some cpus (nehalem) pblendvb has twice the throughput
1284 * of blendvps though on others there just might be domain
1285 * transition penalties when using it (this depends on what llvm
1286 * will chose for the bit ops above so there appears no "right way",
1287 * but given the boatload of selects let's just use the int type).
1288 *
1289 * Unfortunately we also need the sign bit of the summed coords.
1290 */
1291 *face_s = lp_build_select(cint_bld, arx_ge_ary, snewx, snewy);
1292 *face_t = lp_build_select(cint_bld, arx_ge_ary, tnewx, tnewy);
1293 ma = lp_build_select(coord_bld, arx_ge_ary, s, t);
1294 *face = lp_build_select(cint_bld, arx_ge_ary, facex, facey);
1295 sign = lp_build_select(cint_bld, arx_ge_ary, signrxs, signrys);
1296
1297 *face_s = lp_build_select(cint_bld, arz_ge_arx_ary, *face_s, snewz);
1298 *face_t = lp_build_select(cint_bld, arz_ge_arx_ary, *face_t, tnewz);
1299 ma = lp_build_select(coord_bld, arz_ge_arx_ary, ma, r);
1300 *face = lp_build_select(cint_bld, arz_ge_arx_ary, *face, facez);
1301 sign = lp_build_select(cint_bld, arz_ge_arx_ary, sign, signrzs);
1302
1303 *face_s = LLVMBuildBitCast(builder, *face_s,
1304 lp_build_vec_type(gallivm, coord_bld->type), "");
1305 *face_t = LLVMBuildBitCast(builder, *face_t,
1306 lp_build_vec_type(gallivm, coord_bld->type), "");
1307
1308 /* add +1 for neg face */
1309 /* XXX with AVX probably want to use another select here -
1310 * as long as we ensure vblendvps gets used we can actually
1311 * skip the comparison and just use sign as a "mask" directly.
1312 */
1313 sign = LLVMBuildLShr(builder, sign, signshift, "");
1314 *face = LLVMBuildOr(builder, *face, sign, "face");
1315
1316 ima = lp_build_cube_imapos(coord_bld, ma);
1317
1318 *face_s = lp_build_mul(coord_bld, *face_s, ima);
1319 *face_s = lp_build_add(coord_bld, *face_s, posHalf);
1320 *face_t = lp_build_mul(coord_bld, *face_t, ima);
1321 *face_t = lp_build_add(coord_bld, *face_t, posHalf);
1322 }
1323
1324 else {
1325 struct lp_build_if_state if_ctx;
1326 LLVMValueRef face_s_var;
1327 LLVMValueRef face_t_var;
1328 LLVMValueRef face_var;
1329 LLVMValueRef arx_ge_ary_arz, ary_ge_arx_arz;
1330 LLVMValueRef shuffles[4];
1331 LLVMValueRef arxy_ge_aryx, arxy_ge_arzz, arxy_ge_arxy_arzz;
1332 LLVMValueRef arxyxy, aryxzz, arxyxy_ge_aryxzz;
1333 struct lp_build_context *float_bld = &bld->float_bld;
1334
1335 assert(bld->coord_bld.type.length == 4);
1336
1337 shuffles[0] = lp_build_const_int32(gallivm, 0);
1338 shuffles[1] = lp_build_const_int32(gallivm, 1);
1339 shuffles[2] = lp_build_const_int32(gallivm, 0);
1340 shuffles[3] = lp_build_const_int32(gallivm, 1);
1341 arxyxy = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1342 shuffles[0] = lp_build_const_int32(gallivm, 1);
1343 shuffles[1] = lp_build_const_int32(gallivm, 0);
1344 shuffles[2] = lp_build_const_int32(gallivm, 2);
1345 shuffles[3] = lp_build_const_int32(gallivm, 2);
1346 aryxzz = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), "");
1347 arxyxy_ge_aryxzz = lp_build_cmp(&bld->coord_bld, PIPE_FUNC_GEQUAL, arxyxy, aryxzz);
1348
1349 shuffles[0] = lp_build_const_int32(gallivm, 0);
1350 shuffles[1] = lp_build_const_int32(gallivm, 1);
1351 arxy_ge_aryx = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1352 LLVMConstVector(shuffles, 2), "");
1353 shuffles[0] = lp_build_const_int32(gallivm, 2);
1354 shuffles[1] = lp_build_const_int32(gallivm, 3);
1355 arxy_ge_arzz = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz,
1356 LLVMConstVector(shuffles, 2), "");
1357 arxy_ge_arxy_arzz = LLVMBuildAnd(builder, arxy_ge_aryx, arxy_ge_arzz, "");
1358
1359 arx_ge_ary_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1360 lp_build_const_int32(gallivm, 0), "");
1361 arx_ge_ary_arz = LLVMBuildICmp(builder, LLVMIntNE, arx_ge_ary_arz,
1362 lp_build_const_int32(gallivm, 0), "");
1363 ary_ge_arx_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz,
1364 lp_build_const_int32(gallivm, 1), "");
1365 ary_ge_arx_arz = LLVMBuildICmp(builder, LLVMIntNE, ary_ge_arx_arz,
1366 lp_build_const_int32(gallivm, 0), "");
1367 face_s_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_s_var");
1368 face_t_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_t_var");
1369 face_var = lp_build_alloca(gallivm, bld->int_bld.vec_type, "face_var");
1370
1371 lp_build_if(&if_ctx, gallivm, arx_ge_ary_arz);
1372 {
1373 /* +/- X face */
1374 LLVMValueRef sign, ima;
1375 rx = LLVMBuildExtractElement(builder, rxyz,
1376 lp_build_const_int32(gallivm, 0), "");
1377 /* +/- X face */
1378 sign = lp_build_sgn(float_bld, rx);
1379 ima = lp_build_cube_imaneg(coord_bld, s);
1380 *face_s = lp_build_cube_coord(coord_bld, sign, +1, r, ima);
1381 *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1382 *face = lp_build_cube_face(bld, rx,
1383 PIPE_TEX_FACE_POS_X,
1384 PIPE_TEX_FACE_NEG_X);
1385 LLVMBuildStore(builder, *face_s, face_s_var);
1386 LLVMBuildStore(builder, *face_t, face_t_var);
1387 LLVMBuildStore(builder, *face, face_var);
1388 }
1389 lp_build_else(&if_ctx);
1390 {
1391 struct lp_build_if_state if_ctx2;
1392
1393 lp_build_if(&if_ctx2, gallivm, ary_ge_arx_arz);
1394 {
1395 LLVMValueRef sign, ima;
1396 /* +/- Y face */
1397 ry = LLVMBuildExtractElement(builder, rxyz,
1398 lp_build_const_int32(gallivm, 1), "");
1399 sign = lp_build_sgn(float_bld, ry);
1400 ima = lp_build_cube_imaneg(coord_bld, t);
1401 *face_s = lp_build_cube_coord(coord_bld, NULL, -1, s, ima);
1402 *face_t = lp_build_cube_coord(coord_bld, sign, -1, r, ima);
1403 *face = lp_build_cube_face(bld, ry,
1404 PIPE_TEX_FACE_POS_Y,
1405 PIPE_TEX_FACE_NEG_Y);
1406 LLVMBuildStore(builder, *face_s, face_s_var);
1407 LLVMBuildStore(builder, *face_t, face_t_var);
1408 LLVMBuildStore(builder, *face, face_var);
1409 }
1410 lp_build_else(&if_ctx2);
1411 {
1412 /* +/- Z face */
1413 LLVMValueRef sign, ima;
1414 rz = LLVMBuildExtractElement(builder, rxyz,
1415 lp_build_const_int32(gallivm, 2), "");
1416 sign = lp_build_sgn(float_bld, rz);
1417 ima = lp_build_cube_imaneg(coord_bld, r);
1418 *face_s = lp_build_cube_coord(coord_bld, sign, -1, s, ima);
1419 *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima);
1420 *face = lp_build_cube_face(bld, rz,
1421 PIPE_TEX_FACE_POS_Z,
1422 PIPE_TEX_FACE_NEG_Z);
1423 LLVMBuildStore(builder, *face_s, face_s_var);
1424 LLVMBuildStore(builder, *face_t, face_t_var);
1425 LLVMBuildStore(builder, *face, face_var);
1426 }
1427 lp_build_endif(&if_ctx2);
1428 }
1429
1430 lp_build_endif(&if_ctx);
1431
1432 *face_s = LLVMBuildLoad(builder, face_s_var, "face_s");
1433 *face_t = LLVMBuildLoad(builder, face_t_var, "face_t");
1434 *face = LLVMBuildLoad(builder, face_var, "face");
1435 *face = lp_build_broadcast_scalar(&bld->int_coord_bld, *face);
1436 }
1437 }
1438
1439
1440 /**
1441 * Compute the partial offset of a pixel block along an arbitrary axis.
1442 *
1443 * @param coord coordinate in pixels
1444 * @param stride number of bytes between rows of successive pixel blocks
1445 * @param block_length number of pixels in a pixels block along the coordinate
1446 * axis
1447 * @param out_offset resulting relative offset of the pixel block in bytes
1448 * @param out_subcoord resulting sub-block pixel coordinate
1449 */
1450 void
1451 lp_build_sample_partial_offset(struct lp_build_context *bld,
1452 unsigned block_length,
1453 LLVMValueRef coord,
1454 LLVMValueRef stride,
1455 LLVMValueRef *out_offset,
1456 LLVMValueRef *out_subcoord)
1457 {
1458 LLVMBuilderRef builder = bld->gallivm->builder;
1459 LLVMValueRef offset;
1460 LLVMValueRef subcoord;
1461
1462 if (block_length == 1) {
1463 subcoord = bld->zero;
1464 }
1465 else {
1466 /*
1467 * Pixel blocks have power of two dimensions. LLVM should convert the
1468 * rem/div to bit arithmetic.
1469 * TODO: Verify this.
1470 * It does indeed BUT it does transform it to scalar (and back) when doing so
1471 * (using roughly extract, shift/and, mov, unpack) (llvm 2.7).
1472 * The generated code looks seriously unfunny and is quite expensive.
1473 */
1474 #if 0
1475 LLVMValueRef block_width = lp_build_const_int_vec(bld->type, block_length);
1476 subcoord = LLVMBuildURem(builder, coord, block_width, "");
1477 coord = LLVMBuildUDiv(builder, coord, block_width, "");
1478 #else
1479 unsigned logbase2 = util_logbase2(block_length);
1480 LLVMValueRef block_shift = lp_build_const_int_vec(bld->gallivm, bld->type, logbase2);
1481 LLVMValueRef block_mask = lp_build_const_int_vec(bld->gallivm, bld->type, block_length - 1);
1482 subcoord = LLVMBuildAnd(builder, coord, block_mask, "");
1483 coord = LLVMBuildLShr(builder, coord, block_shift, "");
1484 #endif
1485 }
1486
1487 offset = lp_build_mul(bld, coord, stride);
1488
1489 assert(out_offset);
1490 assert(out_subcoord);
1491
1492 *out_offset = offset;
1493 *out_subcoord = subcoord;
1494 }
1495
1496
1497 /**
1498 * Compute the offset of a pixel block.
1499 *
1500 * x, y, z, y_stride, z_stride are vectors, and they refer to pixels.
1501 *
1502 * Returns the relative offset and i,j sub-block coordinates
1503 */
1504 void
1505 lp_build_sample_offset(struct lp_build_context *bld,
1506 const struct util_format_description *format_desc,
1507 LLVMValueRef x,
1508 LLVMValueRef y,
1509 LLVMValueRef z,
1510 LLVMValueRef y_stride,
1511 LLVMValueRef z_stride,
1512 LLVMValueRef *out_offset,
1513 LLVMValueRef *out_i,
1514 LLVMValueRef *out_j)
1515 {
1516 LLVMValueRef x_stride;
1517 LLVMValueRef offset;
1518
1519 x_stride = lp_build_const_vec(bld->gallivm, bld->type,
1520 format_desc->block.bits/8);
1521
1522 lp_build_sample_partial_offset(bld,
1523 format_desc->block.width,
1524 x, x_stride,
1525 &offset, out_i);
1526
1527 if (y && y_stride) {
1528 LLVMValueRef y_offset;
1529 lp_build_sample_partial_offset(bld,
1530 format_desc->block.height,
1531 y, y_stride,
1532 &y_offset, out_j);
1533 offset = lp_build_add(bld, offset, y_offset);
1534 }
1535 else {
1536 *out_j = bld->zero;
1537 }
1538
1539 if (z && z_stride) {
1540 LLVMValueRef z_offset;
1541 LLVMValueRef k;
1542 lp_build_sample_partial_offset(bld,
1543 1, /* pixel blocks are always 2D */
1544 z, z_stride,
1545 &z_offset, &k);
1546 offset = lp_build_add(bld, offset, z_offset);
1547 }
1548
1549 *out_offset = offset;
1550 }