X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fauxiliary%2Fgallivm%2Flp_bld_sample.c;h=fa145e1c3b292dc6356304dee4fb68d5a9de5965;hb=882ca6dfb0f3d17e0f8bc917307d915ab1718069;hp=5322397317202e6cc8e9f6b0638ff36c0e236b85;hpb=cbf0f666311a5cb2720a4d6f4c540da1dd33e418;p=mesa.git diff --git a/src/gallium/auxiliary/gallivm/lp_bld_sample.c b/src/gallium/auxiliary/gallivm/lp_bld_sample.c index 53223973172..fa145e1c3b2 100644 --- a/src/gallium/auxiliary/gallivm/lp_bld_sample.c +++ b/src/gallium/auxiliary/gallivm/lp_bld_sample.c @@ -34,8 +34,9 @@ #include "pipe/p_defines.h" #include "pipe/p_state.h" -#include "util/u_format.h" +#include "util/format/u_format.h" #include "util/u_math.h" +#include "util/u_cpu_detect.h" #include "lp_bld_arit.h" #include "lp_bld_const.h" #include "lp_bld_debug.h" @@ -46,6 +47,8 @@ #include "lp_bld_type.h" #include "lp_bld_logic.h" #include "lp_bld_pack.h" +#include "lp_bld_quad.h" +#include "lp_bld_bitarit.h" /* @@ -110,17 +113,53 @@ lp_sampler_static_texture_state(struct lp_static_texture_state *state, state->swizzle_b = view->swizzle_b; state->swizzle_a = view->swizzle_a; - state->target = texture->target; - state->pot_width = util_is_power_of_two(texture->width0); - state->pot_height = util_is_power_of_two(texture->height0); - state->pot_depth = util_is_power_of_two(texture->depth0); + state->target = view->target; + state->pot_width = util_is_power_of_two_or_zero(texture->width0); + state->pot_height = util_is_power_of_two_or_zero(texture->height0); + state->pot_depth = util_is_power_of_two_or_zero(texture->depth0); state->level_zero_only = !view->u.tex.last_level; /* - * FIXME: Handle the remainder of pipe_sampler_view. + * the layer / element / level parameters are all either dynamic + * state or handled transparently wrt execution. */ } +/** + * Initialize lp_sampler_static_texture_state object with the gallium + * texture/sampler_view state (this contains the parts which are + * considered static). + */ +void +lp_sampler_static_texture_state_image(struct lp_static_texture_state *state, + const struct pipe_image_view *view) +{ + const struct pipe_resource *resource; + + memset(state, 0, sizeof *state); + + if (!view || !view->resource) + return; + + resource = view->resource; + + state->format = view->format; + state->swizzle_r = PIPE_SWIZZLE_X; + state->swizzle_g = PIPE_SWIZZLE_Y; + state->swizzle_b = PIPE_SWIZZLE_Z; + state->swizzle_a = PIPE_SWIZZLE_W; + + state->target = view->resource->target; + state->pot_width = util_is_power_of_two_or_zero(resource->width0); + state->pot_height = util_is_power_of_two_or_zero(resource->height0); + state->pot_depth = util_is_power_of_two_or_zero(resource->depth0); + state->level_zero_only = 0; + + /* + * the layer / element / level parameters are all either dynamic + * state or handled transparently wrt execution. + */ +} /** * Initialize lp_sampler_static_sampler_state object with the gallium sampler @@ -152,17 +191,19 @@ lp_sampler_static_sampler_state(struct lp_static_sampler_state *state, state->wrap_r = sampler->wrap_r; state->min_img_filter = sampler->min_img_filter; state->mag_img_filter = sampler->mag_img_filter; + state->min_mip_filter = sampler->min_mip_filter; + state->seamless_cube_map = sampler->seamless_cube_map; if (sampler->max_lod > 0.0f) { - state->min_mip_filter = sampler->min_mip_filter; - } else { - state->min_mip_filter = PIPE_TEX_MIPFILTER_NONE; + state->max_lod_pos = 1; } - if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) { - if (sampler->lod_bias != 0.0f) { - state->lod_bias_non_zero = 1; - } + if (sampler->lod_bias != 0.0f) { + state->lod_bias_non_zero = 1; + } + + if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE || + state->min_img_filter != state->mag_img_filter) { /* If min_lod == max_lod we can greatly simplify mipmap selection. * This is a case that occurs during automatic mipmap generation. @@ -197,11 +238,15 @@ lp_sampler_static_sampler_state(struct lp_static_sampler_state *state, * Generate code to compute coordinate gradient (rho). * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y * - * The resulting rho is scalar per quad. + * The resulting rho has bld->levelf format (per quad or per element). */ static LLVMValueRef lp_build_rho(struct lp_build_sample_context *bld, unsigned texture_unit, + LLVMValueRef s, + LLVMValueRef t, + LLVMValueRef r, + LLVMValueRef cube_rho, const struct lp_derivatives *derivs) { struct gallivm_state *gallivm = bld->gallivm; @@ -209,9 +254,9 @@ lp_build_rho(struct lp_build_sample_context *bld, struct lp_build_context *float_size_bld = &bld->float_size_in_bld; struct lp_build_context *float_bld = &bld->float_bld; struct lp_build_context *coord_bld = &bld->coord_bld; - struct lp_build_context *perquadf_bld = &bld->perquadf_bld; - const LLVMValueRef *ddx_ddy = derivs->ddx_ddy; + struct lp_build_context *rho_bld = &bld->lodf_bld; const unsigned dims = bld->dims; + LLVMValueRef ddx_ddy[2] = {NULL}; LLVMBuilderRef builder = bld->gallivm->builder; LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context); LLVMValueRef index0 = LLVMConstInt(i32t, 0, 0); @@ -221,143 +266,295 @@ lp_build_rho(struct lp_build_sample_context *bld, LLVMValueRef int_size, float_size; LLVMValueRef rho; LLVMValueRef first_level, first_level_vec; - LLVMValueRef abs_ddx_ddy[2]; unsigned length = coord_bld->type.length; unsigned num_quads = length / 4; + boolean rho_per_quad = rho_bld->type.length != length; + boolean no_rho_opt = bld->no_rho_approx && (dims > 1); unsigned i; LLVMValueRef i32undef = LLVMGetUndef(LLVMInt32TypeInContext(gallivm->context)); LLVMValueRef rho_xvec, rho_yvec; - abs_ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]); - if (dims > 2) { - abs_ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]); - } - else { - abs_ddx_ddy[1] = NULL; + /* Note that all simplified calculations will only work for isotropic filtering */ + + /* + * rho calcs are always per quad except for explicit derivs (excluding + * the messy cube maps for now) when requested. + */ + + first_level = bld->dynamic_state->first_level(bld->dynamic_state, bld->gallivm, + bld->context_ptr, texture_unit); + first_level_vec = lp_build_broadcast_scalar(int_size_bld, first_level); + int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec, TRUE); + float_size = lp_build_int_to_float(float_size_bld, int_size); + + if (cube_rho) { + LLVMValueRef cubesize; + LLVMValueRef index0 = lp_build_const_int32(gallivm, 0); + + /* + * Cube map code did already everything except size mul and per-quad extraction. + * Luckily cube maps are always quadratic! + */ + if (rho_per_quad) { + rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type, + rho_bld->type, cube_rho, 0); + } + else { + rho = lp_build_swizzle_scalar_aos(coord_bld, cube_rho, 0, 4); + } + /* Could optimize this for single quad just skip the broadcast */ + cubesize = lp_build_extract_broadcast(gallivm, bld->float_size_in_type, + rho_bld->type, float_size, index0); + /* skipping sqrt hence returning rho squared */ + cubesize = lp_build_mul(rho_bld, cubesize, cubesize); + rho = lp_build_mul(rho_bld, cubesize, rho); } + else if (derivs) { + LLVMValueRef ddmax[3] = { NULL }, ddx[3] = { NULL }, ddy[3] = { NULL }; + for (i = 0; i < dims; i++) { + LLVMValueRef floatdim; + LLVMValueRef indexi = lp_build_const_int32(gallivm, i); - if (dims == 1) { - static const unsigned char swizzle1[] = { + floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type, + coord_bld->type, float_size, indexi); + + /* + * note that for rho_per_quad case could reduce math (at some shuffle + * cost), but for now use same code to per-pixel lod case. + */ + if (no_rho_opt) { + ddx[i] = lp_build_mul(coord_bld, floatdim, derivs->ddx[i]); + ddy[i] = lp_build_mul(coord_bld, floatdim, derivs->ddy[i]); + ddx[i] = lp_build_mul(coord_bld, ddx[i], ddx[i]); + ddy[i] = lp_build_mul(coord_bld, ddy[i], ddy[i]); + } + else { + LLVMValueRef tmpx, tmpy; + tmpx = lp_build_abs(coord_bld, derivs->ddx[i]); + tmpy = lp_build_abs(coord_bld, derivs->ddy[i]); + ddmax[i] = lp_build_max(coord_bld, tmpx, tmpy); + ddmax[i] = lp_build_mul(coord_bld, floatdim, ddmax[i]); + } + } + if (no_rho_opt) { + rho_xvec = lp_build_add(coord_bld, ddx[0], ddx[1]); + rho_yvec = lp_build_add(coord_bld, ddy[0], ddy[1]); + if (dims > 2) { + rho_xvec = lp_build_add(coord_bld, rho_xvec, ddx[2]); + rho_yvec = lp_build_add(coord_bld, rho_yvec, ddy[2]); + } + rho = lp_build_max(coord_bld, rho_xvec, rho_yvec); + /* skipping sqrt hence returning rho squared */ + } + else { + rho = ddmax[0]; + if (dims > 1) { + rho = lp_build_max(coord_bld, rho, ddmax[1]); + if (dims > 2) { + rho = lp_build_max(coord_bld, rho, ddmax[2]); + } + } + } + if (rho_per_quad) { + /* + * rho_vec contains per-pixel rho, convert to scalar per quad. + */ + rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type, + rho_bld->type, rho, 0); + } + } + else { + /* + * This looks all a bit complex, but it's not that bad + * (the shuffle code makes it look worse than it is). + * Still, might not be ideal for all cases. + */ + static const unsigned char swizzle0[] = { /* no-op swizzle */ 0, LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE }; - static const unsigned char swizzle2[] = { - 1, LP_BLD_SWIZZLE_DONTCARE, - LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE - }; - rho_xvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle1); - rho_yvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle2); - } - else if (dims == 2) { static const unsigned char swizzle1[] = { - 0, 2, + 1, LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE }; static const unsigned char swizzle2[] = { - 1, 3, + 2, LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE }; - rho_xvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle1); - rho_yvec = lp_build_swizzle_aos(coord_bld, abs_ddx_ddy[0], swizzle2); - } - else { - LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH]; - LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH]; - assert(dims == 3); - for (i = 0; i < num_quads; i++) { - shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i); - shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2); - shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i); - shuffles1[4*i + 3] = i32undef; - shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1); - shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3); - shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 1); - shuffles2[4*i + 3] = i32undef; - } - rho_xvec = LLVMBuildShuffleVector(builder, abs_ddx_ddy[0], abs_ddx_ddy[1], - LLVMConstVector(shuffles1, length), ""); - rho_yvec = LLVMBuildShuffleVector(builder, abs_ddx_ddy[0], abs_ddx_ddy[1], - LLVMConstVector(shuffles2, length), ""); - } - rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec); + if (dims < 2) { + ddx_ddy[0] = lp_build_packed_ddx_ddy_onecoord(coord_bld, s); + } + else if (dims >= 2) { + ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t); + if (dims > 2) { + ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r); + } + } - first_level = bld->dynamic_state->first_level(bld->dynamic_state, - bld->gallivm, texture_unit); - first_level_vec = lp_build_broadcast_scalar(int_size_bld, first_level); - int_size = lp_build_minify(int_size_bld, bld->int_size, first_level_vec); - float_size = lp_build_int_to_float(float_size_bld, int_size); + if (no_rho_opt) { + static const unsigned char swizzle01[] = { /* no-op swizzle */ + 0, 1, + LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE + }; + static const unsigned char swizzle23[] = { + 2, 3, + LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE + }; + LLVMValueRef ddx_ddys, ddx_ddyt, floatdim, shuffles[LP_MAX_VECTOR_LENGTH / 4]; - if (bld->coord_type.length > 4) { - /* expand size to each quad */ - if (dims > 1) { - /* could use some broadcast_vector helper for this? */ - int num_quads = bld->coord_type.length / 4; - LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4]; for (i = 0; i < num_quads; i++) { - src[i] = float_size; + shuffles[i*4+0] = shuffles[i*4+1] = index0; + shuffles[i*4+2] = shuffles[i*4+3] = index1; + } + floatdim = LLVMBuildShuffleVector(builder, float_size, float_size, + LLVMConstVector(shuffles, length), ""); + ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], floatdim); + ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]); + ddx_ddys = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01); + ddx_ddyt = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23); + rho_vec = lp_build_add(coord_bld, ddx_ddys, ddx_ddyt); + + if (dims > 2) { + static const unsigned char swizzle02[] = { + 0, 2, + LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE + }; + floatdim = lp_build_extract_broadcast(gallivm, bld->float_size_in_type, + coord_bld->type, float_size, index2); + ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], floatdim); + ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]); + ddx_ddy[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02); + rho_vec = lp_build_add(coord_bld, rho_vec, ddx_ddy[1]); } - float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads); - } - else { - float_size = lp_build_broadcast_scalar(coord_bld, float_size); - } - rho_vec = lp_build_mul(coord_bld, rho_vec, float_size); - if (dims <= 1) { - rho = rho_vec; + rho_xvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0); + rho_yvec = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1); + rho = lp_build_max(coord_bld, rho_xvec, rho_yvec); + + if (rho_per_quad) { + rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type, + rho_bld->type, rho, 0); + } + else { + rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4); + } + /* skipping sqrt hence returning rho squared */ } else { - if (dims >= 2) { - static const unsigned char swizzle1[] = { - 0, LP_BLD_SWIZZLE_DONTCARE, + ddx_ddy[0] = lp_build_abs(coord_bld, ddx_ddy[0]); + if (dims > 2) { + ddx_ddy[1] = lp_build_abs(coord_bld, ddx_ddy[1]); + } + else { + ddx_ddy[1] = NULL; /* silence compiler warning */ + } + + if (dims < 2) { + rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle0); + rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle2); + } + else if (dims == 2) { + static const unsigned char swizzle02[] = { + 0, 2, LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE }; - static const unsigned char swizzle2[] = { - 1, LP_BLD_SWIZZLE_DONTCARE, + static const unsigned char swizzle13[] = { + 1, 3, LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE }; - LLVMValueRef rho_s, rho_t, rho_r; + rho_xvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle02); + rho_yvec = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle13); + } + else { + LLVMValueRef shuffles1[LP_MAX_VECTOR_LENGTH]; + LLVMValueRef shuffles2[LP_MAX_VECTOR_LENGTH]; + assert(dims == 3); + for (i = 0; i < num_quads; i++) { + shuffles1[4*i + 0] = lp_build_const_int32(gallivm, 4*i); + shuffles1[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 2); + shuffles1[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i); + shuffles1[4*i + 3] = i32undef; + shuffles2[4*i + 0] = lp_build_const_int32(gallivm, 4*i + 1); + shuffles2[4*i + 1] = lp_build_const_int32(gallivm, 4*i + 3); + shuffles2[4*i + 2] = lp_build_const_int32(gallivm, length + 4*i + 2); + shuffles2[4*i + 3] = i32undef; + } + rho_xvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1], + LLVMConstVector(shuffles1, length), ""); + rho_yvec = LLVMBuildShuffleVector(builder, ddx_ddy[0], ddx_ddy[1], + LLVMConstVector(shuffles2, length), ""); + } + + rho_vec = lp_build_max(coord_bld, rho_xvec, rho_yvec); + + if (bld->coord_type.length > 4) { + /* expand size to each quad */ + if (dims > 1) { + /* could use some broadcast_vector helper for this? */ + LLVMValueRef src[LP_MAX_VECTOR_LENGTH/4]; + for (i = 0; i < num_quads; i++) { + src[i] = float_size; + } + float_size = lp_build_concat(bld->gallivm, src, float_size_bld->type, num_quads); + } + else { + float_size = lp_build_broadcast_scalar(coord_bld, float_size); + } + rho_vec = lp_build_mul(coord_bld, rho_vec, float_size); + + if (dims <= 1) { + rho = rho_vec; + } + else { + if (dims >= 2) { + LLVMValueRef rho_s, rho_t, rho_r; - rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1); - rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2); + rho_s = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0); + rho_t = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1); - rho = lp_build_max(coord_bld, rho_s, rho_t); + rho = lp_build_max(coord_bld, rho_s, rho_t); - if (dims >= 3) { - static const unsigned char swizzle3[] = { - 2, LP_BLD_SWIZZLE_DONTCARE, - LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE - }; - rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle3); - rho = lp_build_max(coord_bld, rho, rho_r); + if (dims >= 3) { + rho_r = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle2); + rho = lp_build_max(coord_bld, rho, rho_r); + } + } + } + if (rho_per_quad) { + rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type, + rho_bld->type, rho, 0); + } + else { + rho = lp_build_swizzle_scalar_aos(coord_bld, rho, 0, 4); } } - } - rho = lp_build_pack_aos_scalars(bld->gallivm, coord_bld->type, - perquadf_bld->type, rho, 0); - } - else { - if (dims <= 1) { - rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, ""); - } - rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size); + else { + if (dims <= 1) { + rho_vec = LLVMBuildExtractElement(builder, rho_vec, index0, ""); + } + rho_vec = lp_build_mul(float_size_bld, rho_vec, float_size); - if (dims <= 1) { - rho = rho_vec; - } - else { - if (dims >= 2) { - LLVMValueRef rho_s, rho_t, rho_r; + if (dims <= 1) { + rho = rho_vec; + } + else { + if (dims >= 2) { + LLVMValueRef rho_s, rho_t, rho_r; - rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, ""); - rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, ""); + rho_s = LLVMBuildExtractElement(builder, rho_vec, index0, ""); + rho_t = LLVMBuildExtractElement(builder, rho_vec, index1, ""); - rho = lp_build_max(float_bld, rho_s, rho_t); + rho = lp_build_max(float_bld, rho_s, rho_t); - if (dims >= 3) { - rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, ""); - rho = lp_build_max(float_bld, rho, rho_r); + if (dims >= 3) { + rho_r = LLVMBuildExtractElement(builder, rho_vec, index2, ""); + rho = lp_build_max(float_bld, rho, rho_r); + } + } + } + if (!rho_per_quad) { + rho = lp_build_broadcast_scalar(rho_bld, rho); } } } @@ -418,10 +615,8 @@ lp_build_brilinear_lod(struct lp_build_context *bld, lp_build_ifloor_fract(bld, lod, out_lod_ipart, &lod_fpart); - lod_fpart = lp_build_mul(bld, lod_fpart, - lp_build_const_vec(bld->gallivm, bld->type, factor)); - - lod_fpart = lp_build_add(bld, lod_fpart, + lod_fpart = lp_build_mad(bld, lod_fpart, + lp_build_const_vec(bld->gallivm, bld->type, factor), lp_build_const_vec(bld->gallivm, bld->type, post_offset)); /* @@ -465,7 +660,7 @@ lp_build_brilinear_rho(struct lp_build_context *bld, /* * The pre factor will make the intersections with the exact powers of two - * happen precisely where we want then to be, which means that the integer + * happen precisely where we want them to be, which means that the integer * part will not need any post adjustments. */ rho = lp_build_mul(bld, rho, @@ -477,10 +672,8 @@ lp_build_brilinear_rho(struct lp_build_context *bld, /* fpart = rho / 2**ipart */ lod_fpart = lp_build_extract_mantissa(bld, rho); - lod_fpart = lp_build_mul(bld, lod_fpart, - lp_build_const_vec(bld->gallivm, bld->type, factor)); - - lod_fpart = lp_build_add(bld, lod_fpart, + lod_fpart = lp_build_mad(bld, lod_fpart, + lp_build_const_vec(bld->gallivm, bld->type, factor), lp_build_const_vec(bld->gallivm, bld->type, post_offset)); /* @@ -494,62 +687,122 @@ lp_build_brilinear_rho(struct lp_build_context *bld, } +/** + * Fast implementation of iround(log2(sqrt(x))), based on + * log2(x^n) == n*log2(x). + * + * Gives accurate results all the time. + * (Could be trivially extended to handle other power-of-two roots.) + */ +static LLVMValueRef +lp_build_ilog2_sqrt(struct lp_build_context *bld, + LLVMValueRef x) +{ + LLVMBuilderRef builder = bld->gallivm->builder; + LLVMValueRef ipart; + struct lp_type i_type = lp_int_type(bld->type); + LLVMValueRef one = lp_build_const_int_vec(bld->gallivm, i_type, 1); + + assert(bld->type.floating); + + assert(lp_check_value(bld->type, x)); + + /* ipart = log2(x) + 0.5 = 0.5*(log2(x^2) + 1.0) */ + ipart = lp_build_extract_exponent(bld, x, 1); + ipart = LLVMBuildAShr(builder, ipart, one, ""); + + return ipart; +} + + /** * Generate code to compute texture level of detail (lambda). * \param derivs partial derivatives of (s, t, r, q) with respect to X and Y * \param lod_bias optional float vector with the shader lod bias * \param explicit_lod optional float vector with the explicit lod - * \param width scalar int texture width - * \param height scalar int texture height - * \param depth scalar int texture depth + * \param cube_rho rho calculated by cube coord mapping (optional) + * \param out_lod_ipart integer part of lod + * \param out_lod_fpart float part of lod (never larger than 1 but may be negative) + * \param out_lod_positive (mask) if lod is positive (i.e. texture is minified) * - * The resulting lod is scalar per quad, so only the first value per quad - * passed in from lod_bias, explicit_lod is used. + * The resulting lod can be scalar per quad or be per element. */ void lp_build_lod_selector(struct lp_build_sample_context *bld, + boolean is_lodq, unsigned texture_unit, unsigned sampler_unit, + LLVMValueRef s, + LLVMValueRef t, + LLVMValueRef r, + LLVMValueRef cube_rho, const struct lp_derivatives *derivs, LLVMValueRef lod_bias, /* optional */ LLVMValueRef explicit_lod, /* optional */ unsigned mip_filter, + LLVMValueRef *out_lod, LLVMValueRef *out_lod_ipart, - LLVMValueRef *out_lod_fpart) + LLVMValueRef *out_lod_fpart, + LLVMValueRef *out_lod_positive) { LLVMBuilderRef builder = bld->gallivm->builder; - struct lp_build_context *perquadf_bld = &bld->perquadf_bld; + struct lp_sampler_dynamic_state *dynamic_state = bld->dynamic_state; + struct lp_build_context *lodf_bld = &bld->lodf_bld; LLVMValueRef lod; - *out_lod_ipart = bld->perquadi_bld.zero; - *out_lod_fpart = perquadf_bld->zero; + *out_lod_ipart = bld->lodi_bld.zero; + *out_lod_positive = bld->lodi_bld.zero; + *out_lod_fpart = lodf_bld->zero; + + /* + * For determining min/mag, we follow GL 4.1 spec, 3.9.12 Texture Magnification: + * "Implementations may either unconditionally assume c = 0 for the minification + * vs. magnification switch-over point, or may choose to make c depend on the + * combination of minification and magnification modes as follows: if the + * magnification filter is given by LINEAR and the minification filter is given + * by NEAREST_MIPMAP_NEAREST or NEAREST_MIPMAP_LINEAR, then c = 0.5. This is + * done to ensure that a minified texture does not appear "sharper" than a + * magnified texture. Otherwise c = 0." + * And 3.9.11 Texture Minification: + * "If lod is less than or equal to the constant c (see section 3.9.12) the + * texture is said to be magnified; if it is greater, the texture is minified." + * So, using 0 as switchover point always, and using magnification for lod == 0. + * Note that the always c = 0 behavior is new (first appearing in GL 3.1 spec), + * old GL versions required 0.5 for the modes listed above. + * I have no clue about the (undocumented) wishes of d3d9/d3d10 here! + */ - if (bld->static_sampler_state->min_max_lod_equal) { + if (bld->static_sampler_state->min_max_lod_equal && !is_lodq) { /* User is forcing sampling from a particular mipmap level. * This is hit during mipmap generation. */ LLVMValueRef min_lod = - bld->dynamic_state->min_lod(bld->dynamic_state, - bld->gallivm, sampler_unit); + dynamic_state->min_lod(dynamic_state, bld->gallivm, + bld->context_ptr, sampler_unit); - lod = lp_build_broadcast_scalar(perquadf_bld, min_lod); + lod = lp_build_broadcast_scalar(lodf_bld, min_lod); } else { if (explicit_lod) { - lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type, - perquadf_bld->type, explicit_lod, 0); + if (bld->num_lods != bld->coord_type.length) + lod = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type, + lodf_bld->type, explicit_lod, 0); + else + lod = explicit_lod; } else { LLVMValueRef rho; + boolean rho_squared = (bld->no_rho_approx && + (bld->dims > 1)) || cube_rho; - rho = lp_build_rho(bld, texture_unit, derivs); + rho = lp_build_rho(bld, texture_unit, s, t, r, cube_rho, derivs); /* * Compute lod = log2(rho) */ - if (!lod_bias && + if (!lod_bias && !is_lodq && !bld->static_sampler_state->lod_bias_non_zero && !bld->static_sampler_state->apply_max_lod && !bld->static_sampler_state->apply_min_lod) { @@ -561,29 +814,53 @@ lp_build_lod_selector(struct lp_build_sample_context *bld, if (mip_filter == PIPE_TEX_MIPFILTER_NONE || mip_filter == PIPE_TEX_MIPFILTER_NEAREST) { - *out_lod_ipart = lp_build_ilog2(perquadf_bld, rho); - *out_lod_fpart = perquadf_bld->zero; + /* + * Don't actually need both values all the time, lod_ipart is + * needed for nearest mipfilter, lod_positive if min != mag. + */ + if (rho_squared) { + *out_lod_ipart = lp_build_ilog2_sqrt(lodf_bld, rho); + } + else { + *out_lod_ipart = lp_build_ilog2(lodf_bld, rho); + } + *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER, + rho, lodf_bld->one); return; } if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR && - !(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) { - lp_build_brilinear_rho(perquadf_bld, rho, BRILINEAR_FACTOR, + !bld->no_brilinear && !rho_squared) { + /* + * This can't work if rho is squared. Not sure if it could be + * fixed while keeping it worthwile, could also do sqrt here + * but brilinear and no_rho_opt seems like a combination not + * making much sense anyway so just use ordinary path below. + */ + lp_build_brilinear_rho(lodf_bld, rho, BRILINEAR_FACTOR, out_lod_ipart, out_lod_fpart); + *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER, + rho, lodf_bld->one); return; } } if (0) { - lod = lp_build_log2(perquadf_bld, rho); + lod = lp_build_log2(lodf_bld, rho); } else { - lod = lp_build_fast_log2(perquadf_bld, rho); + lod = lp_build_fast_log2(lodf_bld, rho); + } + if (rho_squared) { + /* log2(x^2) == 0.5*log2(x) */ + lod = lp_build_mul(lodf_bld, lod, + lp_build_const_vec(bld->gallivm, lodf_bld->type, 0.5F)); } /* add shader lod bias */ if (lod_bias) { - lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type, - perquadf_bld->type, lod_bias, 0); + if (bld->num_lods != bld->coord_type.length) + lod_bias = lp_build_pack_aos_scalars(bld->gallivm, bld->coord_bld.type, + lodf_bld->type, lod_bias, 0); lod = LLVMBuildFAdd(builder, lod, lod_bias, "shader_lod_bias"); } } @@ -591,45 +868,57 @@ lp_build_lod_selector(struct lp_build_sample_context *bld, /* add sampler lod bias */ if (bld->static_sampler_state->lod_bias_non_zero) { LLVMValueRef sampler_lod_bias = - bld->dynamic_state->lod_bias(bld->dynamic_state, - bld->gallivm, sampler_unit); - sampler_lod_bias = lp_build_broadcast_scalar(perquadf_bld, + dynamic_state->lod_bias(dynamic_state, bld->gallivm, + bld->context_ptr, sampler_unit); + sampler_lod_bias = lp_build_broadcast_scalar(lodf_bld, sampler_lod_bias); lod = LLVMBuildFAdd(builder, lod, sampler_lod_bias, "sampler_lod_bias"); } + if (is_lodq) { + *out_lod = lod; + } + /* clamp lod */ if (bld->static_sampler_state->apply_max_lod) { LLVMValueRef max_lod = - bld->dynamic_state->max_lod(bld->dynamic_state, - bld->gallivm, sampler_unit); - max_lod = lp_build_broadcast_scalar(perquadf_bld, max_lod); + dynamic_state->max_lod(dynamic_state, bld->gallivm, + bld->context_ptr, sampler_unit); + max_lod = lp_build_broadcast_scalar(lodf_bld, max_lod); - lod = lp_build_min(perquadf_bld, lod, max_lod); + lod = lp_build_min(lodf_bld, lod, max_lod); } if (bld->static_sampler_state->apply_min_lod) { LLVMValueRef min_lod = - bld->dynamic_state->min_lod(bld->dynamic_state, - bld->gallivm, sampler_unit); - min_lod = lp_build_broadcast_scalar(perquadf_bld, min_lod); + dynamic_state->min_lod(dynamic_state, bld->gallivm, + bld->context_ptr, sampler_unit); + min_lod = lp_build_broadcast_scalar(lodf_bld, min_lod); - lod = lp_build_max(perquadf_bld, lod, min_lod); + lod = lp_build_max(lodf_bld, lod, min_lod); + } + + if (is_lodq) { + *out_lod_fpart = lod; + return; } } + *out_lod_positive = lp_build_cmp(lodf_bld, PIPE_FUNC_GREATER, + lod, lodf_bld->zero); + if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) { - if (!(gallivm_debug & GALLIVM_DEBUG_NO_BRILINEAR)) { - lp_build_brilinear_lod(perquadf_bld, lod, BRILINEAR_FACTOR, + if (!bld->no_brilinear) { + lp_build_brilinear_lod(lodf_bld, lod, BRILINEAR_FACTOR, out_lod_ipart, out_lod_fpart); } else { - lp_build_ifloor_fract(perquadf_bld, lod, out_lod_ipart, out_lod_fpart); + lp_build_ifloor_fract(lodf_bld, lod, out_lod_ipart, out_lod_fpart); } lp_build_name(*out_lod_fpart, "lod_fpart"); } else { - *out_lod_ipart = lp_build_iround(perquadf_bld, lod); + *out_lod_ipart = lp_build_iround(lodf_bld, lod); } lp_build_name(*out_lod_ipart, "lod_ipart"); @@ -639,38 +928,66 @@ lp_build_lod_selector(struct lp_build_sample_context *bld, /** - * For PIPE_TEX_MIPFILTER_NEAREST, convert float LOD to integer - * mipmap level index. + * For PIPE_TEX_MIPFILTER_NEAREST, convert int part of lod + * to actual mip level. * Note: this is all scalar per quad code. * \param lod_ipart int texture level of detail - * \param level_out returns integer + * \param level_out returns integer + * \param out_of_bounds returns per coord out_of_bounds mask if provided */ void lp_build_nearest_mip_level(struct lp_build_sample_context *bld, unsigned texture_unit, LLVMValueRef lod_ipart, - LLVMValueRef *level_out) + LLVMValueRef *level_out, + LLVMValueRef *out_of_bounds) { - struct lp_build_context *perquadi_bld = &bld->perquadi_bld; + struct lp_build_context *leveli_bld = &bld->leveli_bld; + struct lp_sampler_dynamic_state *dynamic_state = bld->dynamic_state; LLVMValueRef first_level, last_level, level; - first_level = bld->dynamic_state->first_level(bld->dynamic_state, - bld->gallivm, texture_unit); - last_level = bld->dynamic_state->last_level(bld->dynamic_state, - bld->gallivm, texture_unit); - first_level = lp_build_broadcast_scalar(perquadi_bld, first_level); - last_level = lp_build_broadcast_scalar(perquadi_bld, last_level); - - level = lp_build_add(perquadi_bld, lod_ipart, first_level); + first_level = dynamic_state->first_level(dynamic_state, bld->gallivm, + bld->context_ptr, texture_unit); + last_level = dynamic_state->last_level(dynamic_state, bld->gallivm, + bld->context_ptr, texture_unit); + first_level = lp_build_broadcast_scalar(leveli_bld, first_level); + last_level = lp_build_broadcast_scalar(leveli_bld, last_level); + + level = lp_build_add(leveli_bld, lod_ipart, first_level); + + if (out_of_bounds) { + LLVMValueRef out, out1; + out = lp_build_cmp(leveli_bld, PIPE_FUNC_LESS, level, first_level); + out1 = lp_build_cmp(leveli_bld, PIPE_FUNC_GREATER, level, last_level); + out = lp_build_or(leveli_bld, out, out1); + if (bld->num_mips == bld->coord_bld.type.length) { + *out_of_bounds = out; + } + else if (bld->num_mips == 1) { + *out_of_bounds = lp_build_broadcast_scalar(&bld->int_coord_bld, out); + } + else { + assert(bld->num_mips == bld->coord_bld.type.length / 4); + *out_of_bounds = lp_build_unpack_broadcast_aos_scalars(bld->gallivm, + leveli_bld->type, + bld->int_coord_bld.type, + out); + } + level = lp_build_andnot(&bld->int_coord_bld, level, *out_of_bounds); + *level_out = level; + } + else { + /* clamp level to legal range of levels */ + *level_out = lp_build_clamp(leveli_bld, level, first_level, last_level); - /* clamp level to legal range of levels */ - *level_out = lp_build_clamp(perquadi_bld, level, first_level, last_level); + } } /** - * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad int LOD(s) to two (per-quad) - * (adjacent) mipmap level indexes, and fix up float lod part accordingly. + * For PIPE_TEX_MIPFILTER_LINEAR, convert per-quad (or per element) int LOD(s) + * to two (per-quad) (adjacent) mipmap level indexes, and fix up float lod + * part accordingly. * Later, we'll sample from those two mipmap levels and interpolate between them. */ void @@ -682,21 +999,24 @@ lp_build_linear_mip_levels(struct lp_build_sample_context *bld, LLVMValueRef *level1_out) { LLVMBuilderRef builder = bld->gallivm->builder; - struct lp_build_context *perquadi_bld = &bld->perquadi_bld; - struct lp_build_context *perquadf_bld = &bld->perquadf_bld; + struct lp_sampler_dynamic_state *dynamic_state = bld->dynamic_state; + struct lp_build_context *leveli_bld = &bld->leveli_bld; + struct lp_build_context *levelf_bld = &bld->levelf_bld; LLVMValueRef first_level, last_level; LLVMValueRef clamp_min; LLVMValueRef clamp_max; - first_level = bld->dynamic_state->first_level(bld->dynamic_state, - bld->gallivm, texture_unit); - last_level = bld->dynamic_state->last_level(bld->dynamic_state, - bld->gallivm, texture_unit); - first_level = lp_build_broadcast_scalar(perquadi_bld, first_level); - last_level = lp_build_broadcast_scalar(perquadi_bld, last_level); + assert(bld->num_lods == bld->num_mips); - *level0_out = lp_build_add(perquadi_bld, lod_ipart, first_level); - *level1_out = lp_build_add(perquadi_bld, *level0_out, perquadi_bld->one); + first_level = dynamic_state->first_level(dynamic_state, bld->gallivm, + bld->context_ptr, texture_unit); + last_level = dynamic_state->last_level(dynamic_state, bld->gallivm, + bld->context_ptr, texture_unit); + first_level = lp_build_broadcast_scalar(leveli_bld, first_level); + last_level = lp_build_broadcast_scalar(leveli_bld, last_level); + + *level0_out = lp_build_add(leveli_bld, lod_ipart, first_level); + *level1_out = lp_build_add(leveli_bld, *level0_out, leveli_bld->one); /* * Clamp both *level0_out and *level1_out to [first_level, last_level], with @@ -704,15 +1024,6 @@ lp_build_linear_mip_levels(struct lp_build_sample_context *bld, * ends in the process. */ - /* - * This code (vector select in particular) only works with llvm 3.1 - * (if there's more than one quad, with x86 backend). Might consider - * converting to our lp_bld_logic helpers. - */ -#if HAVE_LLVM < 0x0301 - assert(perquadi_bld->type.length == 1); -#endif - /* *level0_out < first_level */ clamp_min = LLVMBuildICmp(builder, LLVMIntSLT, *level0_out, first_level, @@ -725,7 +1036,7 @@ lp_build_linear_mip_levels(struct lp_build_sample_context *bld, first_level, *level1_out, ""); *lod_fpart_inout = LLVMBuildSelect(builder, clamp_min, - perquadf_bld->zero, *lod_fpart_inout, ""); + levelf_bld->zero, *lod_fpart_inout, ""); /* *level0_out >= last_level */ clamp_max = LLVMBuildICmp(builder, LLVMIntSGE, @@ -739,7 +1050,7 @@ lp_build_linear_mip_levels(struct lp_build_sample_context *bld, last_level, *level1_out, ""); *lod_fpart_inout = LLVMBuildSelect(builder, clamp_max, - perquadf_bld->zero, *lod_fpart_inout, ""); + levelf_bld->zero, *lod_fpart_inout, ""); lp_build_name(*level0_out, "texture%u_miplevel0", texture_unit); lp_build_name(*level1_out, "texture%u_miplevel1", texture_unit); @@ -778,17 +1089,17 @@ lp_build_get_mip_offsets(struct lp_build_sample_context *bld, LLVMValueRef indexes[2], offsets, offset1; indexes[0] = lp_build_const_int32(bld->gallivm, 0); - if (bld->num_lods == 1) { + if (bld->num_mips == 1) { indexes[1] = level; offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, ""); offset1 = LLVMBuildLoad(builder, offset1, ""); offsets = lp_build_broadcast_scalar(&bld->int_coord_bld, offset1); } - else if (bld->num_lods == bld->coord_bld.type.length / 4) { + else if (bld->num_mips == bld->coord_bld.type.length / 4) { unsigned i; offsets = bld->int_coord_bld.undef; - for (i = 0; i < bld->num_lods; i++) { + for (i = 0; i < bld->num_mips; i++) { LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i); LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i); indexes[1] = LLVMBuildExtractElement(builder, level, indexi, ""); @@ -801,10 +1112,10 @@ lp_build_get_mip_offsets(struct lp_build_sample_context *bld, else { unsigned i; - assert (bld->num_lods == bld->coord_bld.type.length); + assert (bld->num_mips == bld->coord_bld.type.length); offsets = bld->int_coord_bld.undef; - for (i = 0; i < bld->num_lods; i++) { + for (i = 0; i < bld->num_mips; i++) { LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i); indexes[1] = LLVMBuildExtractElement(builder, level, indexi, ""); offset1 = LLVMBuildGEP(builder, bld->mip_offsets, indexes, 2, ""); @@ -818,12 +1129,14 @@ lp_build_get_mip_offsets(struct lp_build_sample_context *bld, /** * Codegen equivalent for u_minify(). + * @param lod_scalar if lod is a (broadcasted) scalar * Return max(1, base_size >> level); */ LLVMValueRef lp_build_minify(struct lp_build_context *bld, LLVMValueRef base_size, - LLVMValueRef level) + LLVMValueRef level, + boolean lod_scalar) { LLVMBuilderRef builder = bld->gallivm->builder; assert(lp_check_value(bld->type, base_size)); @@ -834,10 +1147,49 @@ lp_build_minify(struct lp_build_context *bld, return base_size; } else { - LLVMValueRef size = - LLVMBuildLShr(builder, base_size, level, "minify"); + LLVMValueRef size; assert(bld->type.sign); - size = lp_build_max(bld, size, bld->one); + if (lod_scalar || + (util_cpu_caps.has_avx2 || !util_cpu_caps.has_sse)) { + size = LLVMBuildLShr(builder, base_size, level, "minify"); + size = lp_build_max(bld, size, bld->one); + } + else { + /* + * emulate shift with float mul, since intel "forgot" shifts with + * per-element shift count until avx2, which results in terrible + * scalar extraction (both count and value), scalar shift, + * vector reinsertion. Should not be an issue on any non-x86 cpu + * with a vector instruction set. + * On cpus with AMD's XOP this should also be unnecessary but I'm + * not sure if llvm would emit this with current flags. + */ + LLVMValueRef const127, const23, lf; + struct lp_type ftype; + struct lp_build_context fbld; + ftype = lp_type_float_vec(32, bld->type.length * bld->type.width); + lp_build_context_init(&fbld, bld->gallivm, ftype); + const127 = lp_build_const_int_vec(bld->gallivm, bld->type, 127); + const23 = lp_build_const_int_vec(bld->gallivm, bld->type, 23); + + /* calculate 2^(-level) float */ + lf = lp_build_sub(bld, const127, level); + lf = lp_build_shl(bld, lf, const23); + lf = LLVMBuildBitCast(builder, lf, fbld.vec_type, ""); + + /* finish shift operation by doing float mul */ + base_size = lp_build_int_to_float(&fbld, base_size); + size = lp_build_mul(&fbld, base_size, lf); + /* + * do the max also with floats because + * a) non-emulated int max requires sse41 + * (this is actually a lie as we could cast to 16bit values + * as 16bit is sufficient and 16bit int max is sse2) + * b) with avx we can do int max 4-wide but float max 8-wide + */ + size = lp_build_max(&fbld, size, fbld.one); + size = lp_build_itrunc(&fbld, size); + } return size; } } @@ -854,20 +1206,20 @@ lp_build_get_level_stride_vec(struct lp_build_sample_context *bld, LLVMBuilderRef builder = bld->gallivm->builder; LLVMValueRef indexes[2], stride, stride1; indexes[0] = lp_build_const_int32(bld->gallivm, 0); - if (bld->num_lods == 1) { + if (bld->num_mips == 1) { indexes[1] = level; stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, ""); stride1 = LLVMBuildLoad(builder, stride1, ""); stride = lp_build_broadcast_scalar(&bld->int_coord_bld, stride1); } - else if (bld->num_lods == bld->coord_bld.type.length / 4) { + else if (bld->num_mips == bld->coord_bld.type.length / 4) { LLVMValueRef stride1; unsigned i; stride = bld->int_coord_bld.undef; - for (i = 0; i < bld->num_lods; i++) { + for (i = 0; i < bld->num_mips; i++) { LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i); - LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, i); + LLVMValueRef indexo = lp_build_const_int32(bld->gallivm, 4 * i); indexes[1] = LLVMBuildExtractElement(builder, level, indexi, ""); stride1 = LLVMBuildGEP(builder, stride_array, indexes, 2, ""); stride1 = LLVMBuildLoad(builder, stride1, ""); @@ -879,7 +1231,7 @@ lp_build_get_level_stride_vec(struct lp_build_sample_context *bld, LLVMValueRef stride1; unsigned i; - assert (bld->num_lods == bld->coord_bld.type.length); + assert (bld->num_mips == bld->coord_bld.type.length); stride = bld->int_coord_bld.undef; for (i = 0; i < bld->coord_bld.type.length; i++) { @@ -912,9 +1264,9 @@ lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld, /* * Compute width, height, depth at mipmap level 'ilevel' */ - if (bld->num_lods == 1) { + if (bld->num_mips == 1) { ilevel_vec = lp_build_broadcast_scalar(&bld->int_size_bld, ilevel); - *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec); + *out_size = lp_build_minify(&bld->int_size_bld, bld->int_size, ilevel_vec, TRUE); } else { LLVMValueRef int_size_vec; @@ -922,7 +1274,7 @@ lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld, unsigned num_quads = bld->coord_bld.type.length / 4; unsigned i; - if (bld->num_lods == num_quads) { + if (bld->num_mips == num_quads) { /* * XXX: this should be #ifndef SANE_INSTRUCTION_SET. * intel "forgot" the variable shift count instruction until avx2. @@ -954,11 +1306,11 @@ lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld, LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i); ileveli = lp_build_extract_broadcast(bld->gallivm, - bld->perquadi_bld.type, + bld->leveli_bld.type, bld4.type, ilevel, indexi); - tmp[i] = lp_build_minify(&bld4, int_size_vec, ileveli); + tmp[i] = lp_build_minify(&bld4, int_size_vec, ileveli, TRUE); } /* * out_size is [w0, h0, d0, _, w1, h1, d1, _, ...] vector for dims > 1, @@ -977,31 +1329,28 @@ lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld, * with 4-wide vector pack all elements into a 8xi16 vector * (on which we can still do useful math) instead of using a 16xi32 * vector. - * FIXME: some callers can't handle this yet. * For dims == 1 this will create [w0, w1, w2, w3, ...] vector. * For dims > 1 this will create [w0, h0, d0, _, w1, h1, d1, _, ...] vector. */ - assert(bld->num_lods == bld->coord_bld.type.length); + assert(bld->num_mips == bld->coord_bld.type.length); if (bld->dims == 1) { - assert(bld->int_size_bld.type.length == 1); + assert(bld->int_size_in_bld.type.length == 1); int_size_vec = lp_build_broadcast_scalar(&bld->int_coord_bld, bld->int_size); - /* vector shift with variable shift count alert... */ - *out_size = lp_build_minify(&bld->int_coord_bld, int_size_vec, ilevel); + *out_size = lp_build_minify(&bld->int_coord_bld, int_size_vec, ilevel, FALSE); } else { LLVMValueRef ilevel1; - for (i = 0; i < bld->num_lods; i++) { + for (i = 0; i < bld->num_mips; i++) { LLVMValueRef indexi = lp_build_const_int32(bld->gallivm, i); ilevel1 = lp_build_extract_broadcast(bld->gallivm, bld->int_coord_type, bld->int_size_in_bld.type, ilevel, indexi); tmp[i] = bld->int_size; - tmp[i] = lp_build_minify(&bld->int_size_in_bld, tmp[i], ilevel1); + tmp[i] = lp_build_minify(&bld->int_size_in_bld, tmp[i], ilevel1, TRUE); } - int_size_vec = lp_build_concat(bld->gallivm, - tmp, - bld->int_size_in_bld.type, - bld->num_lods); + *out_size = lp_build_concat(bld->gallivm, tmp, + bld->int_size_in_bld.type, + bld->num_mips); } } } @@ -1011,10 +1360,7 @@ lp_build_mipmap_level_sizes(struct lp_build_sample_context *bld, bld->row_stride_array, ilevel); } - if (dims == 3 || - bld->static_texture_state->target == PIPE_TEXTURE_CUBE || - bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY || - bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) { + if (dims == 3 || has_layer_coord(bld->static_texture_state->target)) { *img_stride_vec = lp_build_get_level_stride_vec(bld, bld->img_stride_array, ilevel); @@ -1044,7 +1390,7 @@ lp_build_extract_image_sizes(struct lp_build_sample_context *bld, LLVMTypeRef i32t = LLVMInt32TypeInContext(bld->gallivm->context); struct lp_type size_type = size_bld->type; - if (bld->num_lods == 1) { + if (bld->num_mips == 1) { *out_width = lp_build_extract_broadcast(bld->gallivm, size_type, coord_type, @@ -1071,7 +1417,7 @@ lp_build_extract_image_sizes(struct lp_build_sample_context *bld, if (dims == 1) { *out_width = size; } - else if (bld->num_lods == num_quads) { + else if (bld->num_mips == num_quads) { *out_width = lp_build_swizzle_scalar_aos(size_bld, size, 0, 4); if (dims >= 2) { *out_height = lp_build_swizzle_scalar_aos(size_bld, size, 1, 4); @@ -1081,14 +1427,14 @@ lp_build_extract_image_sizes(struct lp_build_sample_context *bld, } } else { - assert(bld->num_lods == bld->coord_type.length); + assert(bld->num_mips == bld->coord_type.length); *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type, coord_type, size, 0); if (dims >= 2) { - *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type, - coord_type, size, 1); + *out_height = lp_build_pack_aos_scalars(bld->gallivm, size_type, + coord_type, size, 1); if (dims == 3) { - *out_width = lp_build_pack_aos_scalars(bld->gallivm, size_type, + *out_depth = lp_build_pack_aos_scalars(bld->gallivm, size_type, coord_type, size, 2); } } @@ -1111,8 +1457,8 @@ lp_build_unnormalized_coords(struct lp_build_sample_context *bld, { const unsigned dims = bld->dims; LLVMValueRef width; - LLVMValueRef height; - LLVMValueRef depth; + LLVMValueRef height = NULL; + LLVMValueRef depth = NULL; lp_build_extract_image_sizes(bld, &bld->float_size_bld, @@ -1132,6 +1478,144 @@ lp_build_unnormalized_coords(struct lp_build_sample_context *bld, } } +/** + * Generate new coords and faces for cubemap texels falling off the face. + * + * @param face face (center) of the pixel + * @param x0 lower x coord + * @param x1 higher x coord (must be x0 + 1) + * @param y0 lower y coord + * @param y1 higher y coord (must be x0 + 1) + * @param max_coord texture cube (level) size - 1 + * @param next_faces new face values when falling off + * @param next_xcoords new x coord values when falling off + * @param next_ycoords new y coord values when falling off + * + * The arrays hold the new values when under/overflow of + * lower x, higher x, lower y, higher y coord would occur (in this order). + * next_xcoords/next_ycoords have two entries each (for both new lower and + * higher coord). + */ +void +lp_build_cube_new_coords(struct lp_build_context *ivec_bld, + LLVMValueRef face, + LLVMValueRef x0, + LLVMValueRef x1, + LLVMValueRef y0, + LLVMValueRef y1, + LLVMValueRef max_coord, + LLVMValueRef next_faces[4], + LLVMValueRef next_xcoords[4][2], + LLVMValueRef next_ycoords[4][2]) +{ + /* + * Lookup tables aren't nice for simd code hence try some logic here. + * (Note that while it would not be necessary to do per-sample (4) lookups + * when using a LUT as it's impossible that texels fall off of positive + * and negative edges simultaneously, it would however be necessary to + * do 2 lookups for corner handling as in this case texels both fall off + * of x and y axes.) + */ + /* + * Next faces (for face 012345): + * x < 0.0 : 451110 + * x >= 1.0 : 540001 + * y < 0.0 : 225422 + * y >= 1.0 : 334533 + * Hence nfx+ (and nfy+) == nfx- (nfy-) xor 1 + * nfx-: face > 1 ? (face == 5 ? 0 : 1) : (4 + face & 1) + * nfy+: face & ~4 > 1 ? face + 2 : 3; + * This could also use pshufb instead, but would need (manually coded) + * ssse3 intrinsic (llvm won't do non-constant shuffles). + */ + struct gallivm_state *gallivm = ivec_bld->gallivm; + LLVMValueRef sel, sel_f2345, sel_f23, sel_f2, tmpsel, tmp; + LLVMValueRef faceand1, sel_fand1, maxmx0, maxmx1, maxmy0, maxmy1; + LLVMValueRef c2 = lp_build_const_int_vec(gallivm, ivec_bld->type, 2); + LLVMValueRef c3 = lp_build_const_int_vec(gallivm, ivec_bld->type, 3); + LLVMValueRef c4 = lp_build_const_int_vec(gallivm, ivec_bld->type, 4); + LLVMValueRef c5 = lp_build_const_int_vec(gallivm, ivec_bld->type, 5); + + sel = lp_build_cmp(ivec_bld, PIPE_FUNC_EQUAL, face, c5); + tmpsel = lp_build_select(ivec_bld, sel, ivec_bld->zero, ivec_bld->one); + sel_f2345 = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, face, ivec_bld->one); + faceand1 = lp_build_and(ivec_bld, face, ivec_bld->one); + tmp = lp_build_add(ivec_bld, faceand1, c4); + next_faces[0] = lp_build_select(ivec_bld, sel_f2345, tmpsel, tmp); + next_faces[1] = lp_build_xor(ivec_bld, next_faces[0], ivec_bld->one); + + tmp = lp_build_andnot(ivec_bld, face, c4); + sel_f23 = lp_build_cmp(ivec_bld, PIPE_FUNC_GREATER, tmp, ivec_bld->one); + tmp = lp_build_add(ivec_bld, face, c2); + next_faces[3] = lp_build_select(ivec_bld, sel_f23, tmp, c3); + next_faces[2] = lp_build_xor(ivec_bld, next_faces[3], ivec_bld->one); + + /* + * new xcoords (for face 012345): + * x < 0.0 : max max t max-t max max + * x >= 1.0 : 0 0 max-t t 0 0 + * y < 0.0 : max 0 max-s s s max-s + * y >= 1.0 : max 0 s max-s s max-s + * + * ncx[1] = face & ~4 > 1 ? (face == 2 ? max-t : t) : 0 + * ncx[0] = max - ncx[1] + * ncx[3] = face > 1 ? (face & 1 ? max-s : s) : (face & 1) ? 0 : max + * ncx[2] = face & ~4 > 1 ? max - ncx[3] : ncx[3] + */ + sel_f2 = lp_build_cmp(ivec_bld, PIPE_FUNC_EQUAL, face, c2); + maxmy0 = lp_build_sub(ivec_bld, max_coord, y0); + tmp = lp_build_select(ivec_bld, sel_f2, maxmy0, y0); + next_xcoords[1][0] = lp_build_select(ivec_bld, sel_f23, tmp, ivec_bld->zero); + next_xcoords[0][0] = lp_build_sub(ivec_bld, max_coord, next_xcoords[1][0]); + maxmy1 = lp_build_sub(ivec_bld, max_coord, y1); + tmp = lp_build_select(ivec_bld, sel_f2, maxmy1, y1); + next_xcoords[1][1] = lp_build_select(ivec_bld, sel_f23, tmp, ivec_bld->zero); + next_xcoords[0][1] = lp_build_sub(ivec_bld, max_coord, next_xcoords[1][1]); + + sel_fand1 = lp_build_cmp(ivec_bld, PIPE_FUNC_EQUAL, faceand1, ivec_bld->one); + + tmpsel = lp_build_select(ivec_bld, sel_fand1, ivec_bld->zero, max_coord); + maxmx0 = lp_build_sub(ivec_bld, max_coord, x0); + tmp = lp_build_select(ivec_bld, sel_fand1, maxmx0, x0); + next_xcoords[3][0] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel); + tmp = lp_build_sub(ivec_bld, max_coord, next_xcoords[3][0]); + next_xcoords[2][0] = lp_build_select(ivec_bld, sel_f23, tmp, next_xcoords[3][0]); + maxmx1 = lp_build_sub(ivec_bld, max_coord, x1); + tmp = lp_build_select(ivec_bld, sel_fand1, maxmx1, x1); + next_xcoords[3][1] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel); + tmp = lp_build_sub(ivec_bld, max_coord, next_xcoords[3][1]); + next_xcoords[2][1] = lp_build_select(ivec_bld, sel_f23, tmp, next_xcoords[3][1]); + + /* + * new ycoords (for face 012345): + * x < 0.0 : t t 0 max t t + * x >= 1.0 : t t 0 max t t + * y < 0.0 : max-s s 0 max max 0 + * y >= 1.0 : s max-s 0 max 0 max + * + * ncy[0] = face & ~4 > 1 ? (face == 2 ? 0 : max) : t + * ncy[1] = ncy[0] + * ncy[3] = face > 1 ? (face & 1 ? max : 0) : (face & 1) ? max-s : max + * ncx[2] = face & ~4 > 1 ? max - ncx[3] : ncx[3] + */ + tmp = lp_build_select(ivec_bld, sel_f2, ivec_bld->zero, max_coord); + next_ycoords[0][0] = lp_build_select(ivec_bld, sel_f23, tmp, y0); + next_ycoords[1][0] = next_ycoords[0][0]; + next_ycoords[0][1] = lp_build_select(ivec_bld, sel_f23, tmp, y1); + next_ycoords[1][1] = next_ycoords[0][1]; + + tmpsel = lp_build_select(ivec_bld, sel_fand1, maxmx0, x0); + tmp = lp_build_select(ivec_bld, sel_fand1, max_coord, ivec_bld->zero); + next_ycoords[3][0] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel); + tmp = lp_build_sub(ivec_bld, max_coord, next_ycoords[3][0]); + next_ycoords[2][0] = lp_build_select(ivec_bld, sel_f23, next_ycoords[3][0], tmp); + tmpsel = lp_build_select(ivec_bld, sel_fand1, maxmx1, x1); + tmp = lp_build_select(ivec_bld, sel_fand1, max_coord, ivec_bld->zero); + next_ycoords[3][1] = lp_build_select(ivec_bld, sel_f2345, tmp, tmpsel); + tmp = lp_build_sub(ivec_bld, max_coord, next_ycoords[3][1]); + next_ycoords[2][1] = lp_build_select(ivec_bld, sel_f23, next_ycoords[3][1], tmp); +} + /** Helper used by lp_build_cube_lookup() */ static LLVMValueRef @@ -1144,323 +1628,376 @@ lp_build_cube_imapos(struct lp_build_context *coord_bld, LLVMValueRef coord) return ima; } -/** Helper used by lp_build_cube_lookup() */ -static LLVMValueRef -lp_build_cube_imaneg(struct lp_build_context *coord_bld, LLVMValueRef coord) -{ - /* ima = -0.5 / abs(coord); */ - LLVMValueRef negHalf = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, -0.5); - LLVMValueRef absCoord = lp_build_abs(coord_bld, coord); - LLVMValueRef ima = lp_build_div(coord_bld, negHalf, absCoord); - return ima; -} -/** - * Helper used by lp_build_cube_lookup() - * FIXME: the sign here can also be 0. - * Arithmetically this could definitely make a difference. Either - * fix the comment or use other (simpler) sign function, not sure - * which one it should be. - * \param sign scalar +1 or -1 - * \param coord float vector - * \param ima float vector +/** Helper for doing 3-wise selection. + * Returns sel1 ? val2 : (sel0 ? val0 : val1). */ static LLVMValueRef -lp_build_cube_coord(struct lp_build_context *coord_bld, - LLVMValueRef sign, int negate_coord, - LLVMValueRef coord, LLVMValueRef ima) +lp_build_select3(struct lp_build_context *sel_bld, + LLVMValueRef sel0, + LLVMValueRef sel1, + LLVMValueRef val0, + LLVMValueRef val1, + LLVMValueRef val2) { - /* return negate(coord) * ima * sign + 0.5; */ - LLVMValueRef half = lp_build_const_vec(coord_bld->gallivm, coord_bld->type, 0.5); - LLVMValueRef res; - - assert(negate_coord == +1 || negate_coord == -1); - - if (negate_coord == -1) { - coord = lp_build_negate(coord_bld, coord); - } - - res = lp_build_mul(coord_bld, coord, ima); - if (sign) { - sign = lp_build_broadcast_scalar(coord_bld, sign); - res = lp_build_mul(coord_bld, res, sign); - } - res = lp_build_add(coord_bld, res, half); - - return res; + LLVMValueRef tmp; + tmp = lp_build_select(sel_bld, sel0, val0, val1); + return lp_build_select(sel_bld, sel1, val2, tmp); } -/** Helper used by lp_build_cube_lookup() - * Return (major_coord >= 0) ? pos_face : neg_face; - */ -static LLVMValueRef -lp_build_cube_face(struct lp_build_sample_context *bld, - LLVMValueRef major_coord, - unsigned pos_face, unsigned neg_face) -{ - struct gallivm_state *gallivm = bld->gallivm; - LLVMBuilderRef builder = gallivm->builder; - LLVMValueRef cmp = LLVMBuildFCmp(builder, LLVMRealUGE, - major_coord, - bld->float_bld.zero, ""); - LLVMValueRef pos = lp_build_const_int32(gallivm, pos_face); - LLVMValueRef neg = lp_build_const_int32(gallivm, neg_face); - LLVMValueRef res = LLVMBuildSelect(builder, cmp, pos, neg, ""); - return res; -} - - - /** * Generate code to do cube face selection and compute per-face texcoords. */ void lp_build_cube_lookup(struct lp_build_sample_context *bld, - LLVMValueRef s, - LLVMValueRef t, - LLVMValueRef r, - LLVMValueRef *face, - LLVMValueRef *face_s, - LLVMValueRef *face_t) + LLVMValueRef *coords, + const struct lp_derivatives *derivs_in, /* optional */ + LLVMValueRef *rho, + struct lp_derivatives *derivs_out, /* optional */ + boolean need_derivs) { struct lp_build_context *coord_bld = &bld->coord_bld; LLVMBuilderRef builder = bld->gallivm->builder; struct gallivm_state *gallivm = bld->gallivm; - LLVMValueRef rx, ry, rz; - LLVMValueRef tmp[4], rxyz, arxyz; + LLVMValueRef si, ti, ri; + + /* + * Do per-pixel face selection. We cannot however (as we used to do) + * simply calculate the derivs afterwards (which is very bogus for + * explicit derivs btw) because the values would be "random" when + * not all pixels lie on the same face. So what we do here is just + * calculate the derivatives after scaling the coords by the absolute + * value of the inverse major axis, and essentially do rho calculation + * steps as if it were a 3d texture. This is perfect if all pixels hit + * the same face, but not so great at edges, I believe the max error + * should be sqrt(2) with no_rho_approx or 2 otherwise (essentially measuring + * the 3d distance between 2 points on the cube instead of measuring up/down + * the edge). Still this is possibly a win over just selecting the same face + * for all pixels. Unfortunately, something like that doesn't work for + * explicit derivatives. + */ + struct lp_build_context *cint_bld = &bld->int_coord_bld; + struct lp_type intctype = cint_bld->type; + LLVMTypeRef coord_vec_type = coord_bld->vec_type; + LLVMTypeRef cint_vec_type = cint_bld->vec_type; + LLVMValueRef as, at, ar, face, face_s, face_t; + LLVMValueRef as_ge_at, maxasat, ar_ge_as_at; + LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz; + LLVMValueRef tnegi, rnegi; + LLVMValueRef ma, mai, signma, signmabit, imahalfpos; + LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5); + LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype, + 1LL << (intctype.width - 1)); + LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype, + intctype.width -1); + LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X); + LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y); + LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z); + LLVMValueRef s = coords[0]; + LLVMValueRef t = coords[1]; + LLVMValueRef r = coords[2]; + + assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1); + assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1); + assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1); /* - * Use the average of the four pixel's texcoords to choose the face. - * Slight simplification just calculate the sum, skip scaling. + * get absolute value (for x/y/z face selection) and sign bit + * (for mirroring minor coords and pos/neg face selection) + * of the original coords. */ - tmp[0] = s; - tmp[1] = t; - tmp[2] = r; - rxyz = lp_build_hadd_partial4(&bld->coord_bld, tmp, 3); - arxyz = lp_build_abs(&bld->coord_bld, rxyz); - - if (coord_bld->type.length > 4) { - struct lp_build_context *cint_bld = &bld->int_coord_bld; - struct lp_type intctype = cint_bld->type; - LLVMValueRef signrxs, signrys, signrzs, signrxyz, sign; - LLVMValueRef arxs, arys, arzs; - LLVMValueRef arx_ge_ary, maxarxsarys, arz_ge_arx_ary; - LLVMValueRef snewx, tnewx, snewy, tnewy, snewz, tnewz; - LLVMValueRef ryneg, rzneg; - LLVMValueRef ma, ima; - LLVMValueRef posHalf = lp_build_const_vec(gallivm, coord_bld->type, 0.5); - LLVMValueRef signmask = lp_build_const_int_vec(gallivm, intctype, - 1 << (intctype.width - 1)); - LLVMValueRef signshift = lp_build_const_int_vec(gallivm, intctype, - intctype.width -1); - LLVMValueRef facex = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_X); - LLVMValueRef facey = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Y); - LLVMValueRef facez = lp_build_const_int_vec(gallivm, intctype, PIPE_TEX_FACE_POS_Z); - - assert(PIPE_TEX_FACE_NEG_X == PIPE_TEX_FACE_POS_X + 1); - assert(PIPE_TEX_FACE_NEG_Y == PIPE_TEX_FACE_POS_Y + 1); - assert(PIPE_TEX_FACE_NEG_Z == PIPE_TEX_FACE_POS_Z + 1); - - rx = LLVMBuildBitCast(builder, s, lp_build_vec_type(gallivm, intctype), ""); - ry = LLVMBuildBitCast(builder, t, lp_build_vec_type(gallivm, intctype), ""); - rz = LLVMBuildBitCast(builder, r, lp_build_vec_type(gallivm, intctype), ""); - ryneg = LLVMBuildXor(builder, ry, signmask, ""); - rzneg = LLVMBuildXor(builder, rz, signmask, ""); - - /* the sign bit comes from the averaged vector (per quad), - * as does the decision which face to use */ - signrxyz = LLVMBuildBitCast(builder, rxyz, lp_build_vec_type(gallivm, intctype), ""); - signrxyz = LLVMBuildAnd(builder, signrxyz, signmask, ""); - - arxs = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 0, 4); - arys = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 1, 4); - arzs = lp_build_swizzle_scalar_aos(coord_bld, arxyz, 2, 4); + as = lp_build_abs(&bld->coord_bld, s); + at = lp_build_abs(&bld->coord_bld, t); + ar = lp_build_abs(&bld->coord_bld, r); + /* + * major face determination: select x if x > y else select y + * select z if z >= max(x,y) else select previous result + * if some axis are the same we chose z over y, y over x - the + * dx10 spec seems to ask for it while OpenGL doesn't care (if we + * wouldn't care could save a select or two if using different + * compares and doing at_g_as_ar last since tnewx and tnewz are the + * same). + */ + as_ge_at = lp_build_cmp(coord_bld, PIPE_FUNC_GREATER, as, at); + maxasat = lp_build_max(coord_bld, as, at); + ar_ge_as_at = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, ar, maxasat); + + if (need_derivs && (derivs_in || (bld->no_quad_lod && bld->no_rho_approx))) { + /* + * XXX: This is really really complex. + * It is a bit overkill to use this for implicit derivatives as well, + * no way this is worth the cost in practice, but seems to be the + * only way for getting accurate and per-pixel lod values. + */ + LLVMValueRef ima, imahalf, tmp, ddx[3], ddy[3]; + LLVMValueRef madx, mady, madxdivma, madydivma; + LLVMValueRef sdxi, tdxi, rdxi, sdyi, tdyi, rdyi; + LLVMValueRef tdxnegi, rdxnegi, tdynegi, rdynegi; + LLVMValueRef sdxnewx, sdxnewy, sdxnewz, tdxnewx, tdxnewy, tdxnewz; + LLVMValueRef sdynewx, sdynewy, sdynewz, tdynewx, tdynewy, tdynewz; + LLVMValueRef face_sdx, face_tdx, face_sdy, face_tdy; /* - * select x if x >= y else select y - * select previous result if y >= max(x,y) else select z + * s = 1/2 * ( sc / ma + 1) + * t = 1/2 * ( tc / ma + 1) + * + * s' = 1/2 * (sc' * ma - sc * ma') / ma^2 + * t' = 1/2 * (tc' * ma - tc * ma') / ma^2 + * + * dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma + * dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma + * dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma + * dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma */ - arx_ge_ary = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, arxs, arys); - maxarxsarys = lp_build_max(coord_bld, arxs, arys); - arz_ge_arx_ary = lp_build_cmp(coord_bld, PIPE_FUNC_GEQUAL, maxarxsarys, arzs); + + /* select ma, calculate ima */ + ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r); + mai = LLVMBuildBitCast(builder, ma, cint_vec_type, ""); + signmabit = LLVMBuildAnd(builder, mai, signmask, ""); + ima = lp_build_div(coord_bld, coord_bld->one, ma); + imahalf = lp_build_mul(coord_bld, posHalf, ima); + imahalfpos = lp_build_abs(coord_bld, imahalf); + + if (!derivs_in) { + ddx[0] = lp_build_ddx(coord_bld, s); + ddx[1] = lp_build_ddx(coord_bld, t); + ddx[2] = lp_build_ddx(coord_bld, r); + ddy[0] = lp_build_ddy(coord_bld, s); + ddy[1] = lp_build_ddy(coord_bld, t); + ddy[2] = lp_build_ddy(coord_bld, r); + } + else { + ddx[0] = derivs_in->ddx[0]; + ddx[1] = derivs_in->ddx[1]; + ddx[2] = derivs_in->ddx[2]; + ddy[0] = derivs_in->ddy[0]; + ddy[1] = derivs_in->ddy[1]; + ddy[2] = derivs_in->ddy[2]; + } + + /* select major derivatives */ + madx = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddx[0], ddx[1], ddx[2]); + mady = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, ddy[0], ddy[1], ddy[2]); + + si = LLVMBuildBitCast(builder, s, cint_vec_type, ""); + ti = LLVMBuildBitCast(builder, t, cint_vec_type, ""); + ri = LLVMBuildBitCast(builder, r, cint_vec_type, ""); + + sdxi = LLVMBuildBitCast(builder, ddx[0], cint_vec_type, ""); + tdxi = LLVMBuildBitCast(builder, ddx[1], cint_vec_type, ""); + rdxi = LLVMBuildBitCast(builder, ddx[2], cint_vec_type, ""); + + sdyi = LLVMBuildBitCast(builder, ddy[0], cint_vec_type, ""); + tdyi = LLVMBuildBitCast(builder, ddy[1], cint_vec_type, ""); + rdyi = LLVMBuildBitCast(builder, ddy[2], cint_vec_type, ""); /* - * compute all possible new s/t coords - * snewx = signrx * -rz; - * tnewx = -ry; - * snewy = rx; - * tnewy = signry * rz; - * snewz = signrz * rx; - * tnewz = -ry; + * compute all possible new s/t coords, which does the mirroring, + * and do the same for derivs minor axes. + * snewx = signma * -r; + * tnewx = -t; + * snewy = s; + * tnewy = signma * r; + * snewz = signma * s; + * tnewz = -t; */ - signrxs = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 0, 4); - snewx = LLVMBuildXor(builder, signrxs, rzneg, ""); - tnewx = ryneg; - - signrys = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 1, 4); - snewy = rx; - tnewy = LLVMBuildXor(builder, signrys, rz, ""); - - signrzs = lp_build_swizzle_scalar_aos(cint_bld, signrxyz, 2, 4); - snewz = LLVMBuildXor(builder, signrzs, rx, ""); - tnewz = ryneg; - - /* XXX on x86 unclear if we should cast the values back to float - * or not - on some cpus (nehalem) pblendvb has twice the throughput - * of blendvps though on others there just might be domain - * transition penalties when using it (this depends on what llvm - * will chose for the bit ops above so there appears no "right way", - * but given the boatload of selects let's just use the int type). - * - * Unfortunately we also need the sign bit of the summed coords. + tnegi = LLVMBuildXor(builder, ti, signmask, ""); + rnegi = LLVMBuildXor(builder, ri, signmask, ""); + tdxnegi = LLVMBuildXor(builder, tdxi, signmask, ""); + rdxnegi = LLVMBuildXor(builder, rdxi, signmask, ""); + tdynegi = LLVMBuildXor(builder, tdyi, signmask, ""); + rdynegi = LLVMBuildXor(builder, rdyi, signmask, ""); + + snewx = LLVMBuildXor(builder, signmabit, rnegi, ""); + tnewx = tnegi; + sdxnewx = LLVMBuildXor(builder, signmabit, rdxnegi, ""); + tdxnewx = tdxnegi; + sdynewx = LLVMBuildXor(builder, signmabit, rdynegi, ""); + tdynewx = tdynegi; + + snewy = si; + tnewy = LLVMBuildXor(builder, signmabit, ri, ""); + sdxnewy = sdxi; + tdxnewy = LLVMBuildXor(builder, signmabit, rdxi, ""); + sdynewy = sdyi; + tdynewy = LLVMBuildXor(builder, signmabit, rdyi, ""); + + snewz = LLVMBuildXor(builder, signmabit, si, ""); + tnewz = tnegi; + sdxnewz = LLVMBuildXor(builder, signmabit, sdxi, ""); + tdxnewz = tdxnegi; + sdynewz = LLVMBuildXor(builder, signmabit, sdyi, ""); + tdynewz = tdynegi; + + /* select the mirrored values */ + face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez); + face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz); + face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz); + face_sdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdxnewx, sdxnewy, sdxnewz); + face_tdx = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdxnewx, tdxnewy, tdxnewz); + face_sdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, sdynewx, sdynewy, sdynewz); + face_tdy = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tdynewx, tdynewy, tdynewz); + + face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, ""); + face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, ""); + face_sdx = LLVMBuildBitCast(builder, face_sdx, coord_vec_type, ""); + face_tdx = LLVMBuildBitCast(builder, face_tdx, coord_vec_type, ""); + face_sdy = LLVMBuildBitCast(builder, face_sdy, coord_vec_type, ""); + face_tdy = LLVMBuildBitCast(builder, face_tdy, coord_vec_type, ""); + + /* deriv math, dx.s = 0.5 * (dx.sc - sc * dx.ma / ma) / ma */ + madxdivma = lp_build_mul(coord_bld, madx, ima); + tmp = lp_build_mul(coord_bld, madxdivma, face_s); + tmp = lp_build_sub(coord_bld, face_sdx, tmp); + derivs_out->ddx[0] = lp_build_mul(coord_bld, tmp, imahalf); + + /* dx.t = 0.5 * (dx.tc - tc * dx.ma / ma) / ma */ + tmp = lp_build_mul(coord_bld, madxdivma, face_t); + tmp = lp_build_sub(coord_bld, face_tdx, tmp); + derivs_out->ddx[1] = lp_build_mul(coord_bld, tmp, imahalf); + + /* dy.s = 0.5 * (dy.sc - sc * dy.ma / ma) / ma */ + madydivma = lp_build_mul(coord_bld, mady, ima); + tmp = lp_build_mul(coord_bld, madydivma, face_s); + tmp = lp_build_sub(coord_bld, face_sdy, tmp); + derivs_out->ddy[0] = lp_build_mul(coord_bld, tmp, imahalf); + + /* dy.t = 0.5 * (dy.tc - tc * dy.ma / ma) / ma */ + tmp = lp_build_mul(coord_bld, madydivma, face_t); + tmp = lp_build_sub(coord_bld, face_tdy, tmp); + derivs_out->ddy[1] = lp_build_mul(coord_bld, tmp, imahalf); + + signma = LLVMBuildLShr(builder, mai, signshift, ""); + coords[2] = LLVMBuildOr(builder, face, signma, "face"); + + /* project coords */ + face_s = lp_build_mul(coord_bld, face_s, imahalfpos); + face_t = lp_build_mul(coord_bld, face_t, imahalfpos); + + coords[0] = lp_build_add(coord_bld, face_s, posHalf); + coords[1] = lp_build_add(coord_bld, face_t, posHalf); + + return; + } + + else if (need_derivs) { + LLVMValueRef ddx_ddy[2], tmp[3], rho_vec; + static const unsigned char swizzle0[] = { /* no-op swizzle */ + 0, LP_BLD_SWIZZLE_DONTCARE, + LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE + }; + static const unsigned char swizzle1[] = { + 1, LP_BLD_SWIZZLE_DONTCARE, + LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE + }; + static const unsigned char swizzle01[] = { /* no-op swizzle */ + 0, 1, + LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE + }; + static const unsigned char swizzle23[] = { + 2, 3, + LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE + }; + static const unsigned char swizzle02[] = { + 0, 2, + LP_BLD_SWIZZLE_DONTCARE, LP_BLD_SWIZZLE_DONTCARE + }; + + /* + * scale the s/t/r coords pre-select/mirror so we can calculate + * "reasonable" derivs. */ - *face_s = lp_build_select(cint_bld, arx_ge_ary, snewx, snewy); - *face_t = lp_build_select(cint_bld, arx_ge_ary, tnewx, tnewy); - ma = lp_build_select(coord_bld, arx_ge_ary, s, t); - *face = lp_build_select(cint_bld, arx_ge_ary, facex, facey); - sign = lp_build_select(cint_bld, arx_ge_ary, signrxs, signrys); - - *face_s = lp_build_select(cint_bld, arz_ge_arx_ary, *face_s, snewz); - *face_t = lp_build_select(cint_bld, arz_ge_arx_ary, *face_t, tnewz); - ma = lp_build_select(coord_bld, arz_ge_arx_ary, ma, r); - *face = lp_build_select(cint_bld, arz_ge_arx_ary, *face, facez); - sign = lp_build_select(cint_bld, arz_ge_arx_ary, sign, signrzs); - - *face_s = LLVMBuildBitCast(builder, *face_s, - lp_build_vec_type(gallivm, coord_bld->type), ""); - *face_t = LLVMBuildBitCast(builder, *face_t, - lp_build_vec_type(gallivm, coord_bld->type), ""); - - /* add +1 for neg face */ - /* XXX with AVX probably want to use another select here - - * as long as we ensure vblendvps gets used we can actually - * skip the comparison and just use sign as a "mask" directly. + ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r); + imahalfpos = lp_build_cube_imapos(coord_bld, ma); + s = lp_build_mul(coord_bld, s, imahalfpos); + t = lp_build_mul(coord_bld, t, imahalfpos); + r = lp_build_mul(coord_bld, r, imahalfpos); + + /* + * This isn't quite the same as the "ordinary" (3d deriv) path since we + * know the texture is square which simplifies things (we can omit the + * size mul which happens very early completely here and do it at the + * very end). + * Also always do calculations according to GALLIVM_DEBUG_NO_RHO_APPROX + * since the error can get quite big otherwise at edges. + * (With no_rho_approx max error is sqrt(2) at edges, same as it is + * without no_rho_approx for 2d textures, otherwise it would be factor 2.) */ - sign = LLVMBuildLShr(builder, sign, signshift, ""); - *face = LLVMBuildOr(builder, *face, sign, "face"); + ddx_ddy[0] = lp_build_packed_ddx_ddy_twocoord(coord_bld, s, t); + ddx_ddy[1] = lp_build_packed_ddx_ddy_onecoord(coord_bld, r); + + ddx_ddy[0] = lp_build_mul(coord_bld, ddx_ddy[0], ddx_ddy[0]); + ddx_ddy[1] = lp_build_mul(coord_bld, ddx_ddy[1], ddx_ddy[1]); + + tmp[0] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle01); + tmp[1] = lp_build_swizzle_aos(coord_bld, ddx_ddy[0], swizzle23); + tmp[2] = lp_build_swizzle_aos(coord_bld, ddx_ddy[1], swizzle02); - ima = lp_build_cube_imapos(coord_bld, ma); + rho_vec = lp_build_add(coord_bld, tmp[0], tmp[1]); + rho_vec = lp_build_add(coord_bld, rho_vec, tmp[2]); - *face_s = lp_build_mul(coord_bld, *face_s, ima); - *face_s = lp_build_add(coord_bld, *face_s, posHalf); - *face_t = lp_build_mul(coord_bld, *face_t, ima); - *face_t = lp_build_add(coord_bld, *face_t, posHalf); + tmp[0] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle0); + tmp[1] = lp_build_swizzle_aos(coord_bld, rho_vec, swizzle1); + *rho = lp_build_max(coord_bld, tmp[0], tmp[1]); } - else { - struct lp_build_if_state if_ctx; - LLVMValueRef face_s_var; - LLVMValueRef face_t_var; - LLVMValueRef face_var; - LLVMValueRef arx_ge_ary_arz, ary_ge_arx_arz; - LLVMValueRef shuffles[4]; - LLVMValueRef arxy_ge_aryx, arxy_ge_arzz, arxy_ge_arxy_arzz; - LLVMValueRef arxyxy, aryxzz, arxyxy_ge_aryxzz; - struct lp_build_context *float_bld = &bld->float_bld; - - assert(bld->coord_bld.type.length == 4); - - shuffles[0] = lp_build_const_int32(gallivm, 0); - shuffles[1] = lp_build_const_int32(gallivm, 1); - shuffles[2] = lp_build_const_int32(gallivm, 0); - shuffles[3] = lp_build_const_int32(gallivm, 1); - arxyxy = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), ""); - shuffles[0] = lp_build_const_int32(gallivm, 1); - shuffles[1] = lp_build_const_int32(gallivm, 0); - shuffles[2] = lp_build_const_int32(gallivm, 2); - shuffles[3] = lp_build_const_int32(gallivm, 2); - aryxzz = LLVMBuildShuffleVector(builder, arxyz, arxyz, LLVMConstVector(shuffles, 4), ""); - arxyxy_ge_aryxzz = lp_build_cmp(&bld->coord_bld, PIPE_FUNC_GEQUAL, arxyxy, aryxzz); - - shuffles[0] = lp_build_const_int32(gallivm, 0); - shuffles[1] = lp_build_const_int32(gallivm, 1); - arxy_ge_aryx = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz, - LLVMConstVector(shuffles, 2), ""); - shuffles[0] = lp_build_const_int32(gallivm, 2); - shuffles[1] = lp_build_const_int32(gallivm, 3); - arxy_ge_arzz = LLVMBuildShuffleVector(builder, arxyxy_ge_aryxzz, arxyxy_ge_aryxzz, - LLVMConstVector(shuffles, 2), ""); - arxy_ge_arxy_arzz = LLVMBuildAnd(builder, arxy_ge_aryx, arxy_ge_arzz, ""); - - arx_ge_ary_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz, - lp_build_const_int32(gallivm, 0), ""); - arx_ge_ary_arz = LLVMBuildICmp(builder, LLVMIntNE, arx_ge_ary_arz, - lp_build_const_int32(gallivm, 0), ""); - ary_ge_arx_arz = LLVMBuildExtractElement(builder, arxy_ge_arxy_arzz, - lp_build_const_int32(gallivm, 1), ""); - ary_ge_arx_arz = LLVMBuildICmp(builder, LLVMIntNE, ary_ge_arx_arz, - lp_build_const_int32(gallivm, 0), ""); - face_s_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_s_var"); - face_t_var = lp_build_alloca(gallivm, bld->coord_bld.vec_type, "face_t_var"); - face_var = lp_build_alloca(gallivm, bld->int_bld.vec_type, "face_var"); - - lp_build_if(&if_ctx, gallivm, arx_ge_ary_arz); - { - /* +/- X face */ - LLVMValueRef sign, ima; - rx = LLVMBuildExtractElement(builder, rxyz, - lp_build_const_int32(gallivm, 0), ""); - /* +/- X face */ - sign = lp_build_sgn(float_bld, rx); - ima = lp_build_cube_imaneg(coord_bld, s); - *face_s = lp_build_cube_coord(coord_bld, sign, +1, r, ima); - *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima); - *face = lp_build_cube_face(bld, rx, - PIPE_TEX_FACE_POS_X, - PIPE_TEX_FACE_NEG_X); - LLVMBuildStore(builder, *face_s, face_s_var); - LLVMBuildStore(builder, *face_t, face_t_var); - LLVMBuildStore(builder, *face, face_var); - } - lp_build_else(&if_ctx); - { - struct lp_build_if_state if_ctx2; - - lp_build_if(&if_ctx2, gallivm, ary_ge_arx_arz); - { - LLVMValueRef sign, ima; - /* +/- Y face */ - ry = LLVMBuildExtractElement(builder, rxyz, - lp_build_const_int32(gallivm, 1), ""); - sign = lp_build_sgn(float_bld, ry); - ima = lp_build_cube_imaneg(coord_bld, t); - *face_s = lp_build_cube_coord(coord_bld, NULL, -1, s, ima); - *face_t = lp_build_cube_coord(coord_bld, sign, -1, r, ima); - *face = lp_build_cube_face(bld, ry, - PIPE_TEX_FACE_POS_Y, - PIPE_TEX_FACE_NEG_Y); - LLVMBuildStore(builder, *face_s, face_s_var); - LLVMBuildStore(builder, *face_t, face_t_var); - LLVMBuildStore(builder, *face, face_var); - } - lp_build_else(&if_ctx2); - { - /* +/- Z face */ - LLVMValueRef sign, ima; - rz = LLVMBuildExtractElement(builder, rxyz, - lp_build_const_int32(gallivm, 2), ""); - sign = lp_build_sgn(float_bld, rz); - ima = lp_build_cube_imaneg(coord_bld, r); - *face_s = lp_build_cube_coord(coord_bld, sign, -1, s, ima); - *face_t = lp_build_cube_coord(coord_bld, NULL, +1, t, ima); - *face = lp_build_cube_face(bld, rz, - PIPE_TEX_FACE_POS_Z, - PIPE_TEX_FACE_NEG_Z); - LLVMBuildStore(builder, *face_s, face_s_var); - LLVMBuildStore(builder, *face_t, face_t_var); - LLVMBuildStore(builder, *face, face_var); - } - lp_build_endif(&if_ctx2); - } + if (!need_derivs) { + ma = lp_build_select3(coord_bld, as_ge_at, ar_ge_as_at, s, t, r); + } + mai = LLVMBuildBitCast(builder, ma, cint_vec_type, ""); + signmabit = LLVMBuildAnd(builder, mai, signmask, ""); + + si = LLVMBuildBitCast(builder, s, cint_vec_type, ""); + ti = LLVMBuildBitCast(builder, t, cint_vec_type, ""); + ri = LLVMBuildBitCast(builder, r, cint_vec_type, ""); + + /* + * compute all possible new s/t coords, which does the mirroring + * snewx = signma * -r; + * tnewx = -t; + * snewy = s; + * tnewy = signma * r; + * snewz = signma * s; + * tnewz = -t; + */ + tnegi = LLVMBuildXor(builder, ti, signmask, ""); + rnegi = LLVMBuildXor(builder, ri, signmask, ""); + + snewx = LLVMBuildXor(builder, signmabit, rnegi, ""); + tnewx = tnegi; + + snewy = si; + tnewy = LLVMBuildXor(builder, signmabit, ri, ""); - lp_build_endif(&if_ctx); + snewz = LLVMBuildXor(builder, signmabit, si, ""); + tnewz = tnegi; - *face_s = LLVMBuildLoad(builder, face_s_var, "face_s"); - *face_t = LLVMBuildLoad(builder, face_t_var, "face_t"); - *face = LLVMBuildLoad(builder, face_var, "face"); - *face = lp_build_broadcast_scalar(&bld->int_coord_bld, *face); + /* select the mirrored values */ + face_s = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, snewx, snewy, snewz); + face_t = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, tnewx, tnewy, tnewz); + face = lp_build_select3(cint_bld, as_ge_at, ar_ge_as_at, facex, facey, facez); + + face_s = LLVMBuildBitCast(builder, face_s, coord_vec_type, ""); + face_t = LLVMBuildBitCast(builder, face_t, coord_vec_type, ""); + + /* add +1 for neg face */ + /* XXX with AVX probably want to use another select here - + * as long as we ensure vblendvps gets used we can actually + * skip the comparison and just use sign as a "mask" directly. + */ + signma = LLVMBuildLShr(builder, mai, signshift, ""); + coords[2] = LLVMBuildOr(builder, face, signma, "face"); + + /* project coords */ + if (!need_derivs) { + imahalfpos = lp_build_cube_imapos(coord_bld, ma); + face_s = lp_build_mul(coord_bld, face_s, imahalfpos); + face_t = lp_build_mul(coord_bld, face_t, imahalfpos); } + + coords[0] = lp_build_add(coord_bld, face_s, posHalf); + coords[1] = lp_build_add(coord_bld, face_t, posHalf); }