bld->texel_type,
data_ptr, offset,
i, j,
+ bld->cache,
texel_out);
/*
break;
case PIPE_TEX_WRAP_MIRROR_REPEAT:
+ if (offset) {
+ offset = lp_build_int_to_float(coord_bld, offset);
+ offset = lp_build_div(coord_bld, offset, length_f);
+ coord = lp_build_add(coord_bld, coord, offset);
+ }
/* compute mirror function */
coord = lp_build_coord_mirror(bld, coord);
/* scale coord to length */
coord = lp_build_mul(coord_bld, coord, length_f);
coord = lp_build_sub(coord_bld, coord, half);
- if (offset) {
- offset = lp_build_int_to_float(coord_bld, offset);
- coord = lp_build_add(coord_bld, coord, offset);
- }
/* convert to int, compute lerp weight */
lp_build_ifloor_fract(coord_bld, coord, &coord0, &weight);
coord = lp_build_mul(coord_bld, coord, length_f);
}
+ if (offset) {
+ offset = lp_build_int_to_float(coord_bld, offset);
+ coord = lp_build_add(coord_bld, coord, offset);
+ }
/* floor */
/* use itrunc instead since we clamp to 0 anyway */
icoord = lp_build_itrunc(coord_bld, coord);
- if (offset) {
- icoord = lp_build_add(int_coord_bld, icoord, offset);
- }
/* clamp to [0, length - 1]. */
icoord = lp_build_clamp(int_coord_bld, icoord, int_coord_bld->zero,
*/
static void
lp_build_sample_image_linear(struct lp_build_sample_context *bld,
+ boolean is_gather,
LLVMValueRef size,
LLVMValueRef linear_mask,
LLVMValueRef row_stride_vec,
LLVMBuilderRef builder = bld->gallivm->builder;
struct lp_build_context *ivec_bld = &bld->int_coord_bld;
struct lp_build_context *coord_bld = &bld->coord_bld;
+ struct lp_build_context *texel_bld = &bld->texel_bld;
const unsigned dims = bld->dims;
LLVMValueRef width_vec;
LLVMValueRef height_vec;
seamless_cube_filter = (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
bld->static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
bld->static_sampler_state->seamless_cube_map;
- accurate_cube_corners = ACCURATE_CUBE_CORNERS && seamless_cube_filter;
+ /*
+ * XXX I don't know how this is really supposed to work with gather. From GL
+ * spec wording (not gather specific) it sounds like the 4th missing texel
+ * should be an average of the other 3, hence for gather could return this.
+ * This is however NOT how the code here works, which just fixes up the
+ * weights used for filtering instead. And of course for gather there is
+ * no filter to tweak...
+ */
+ accurate_cube_corners = ACCURATE_CUBE_CORNERS && seamless_cube_filter &&
+ !is_gather;
lp_build_extract_image_sizes(bld,
&bld->int_size_bld,
data_ptr, mipoffsets, neighbors[0][1]);
if (dims == 1) {
+ assert(!is_gather);
if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
/* Interpolate two samples from 1D image to produce one color */
for (chan = 0; chan < 4; chan++) {
- colors_out[chan] = lp_build_lerp(&bld->texel_bld, s_fpart,
+ colors_out[chan] = lp_build_lerp(texel_bld, s_fpart,
neighbors[0][0][chan],
neighbors[0][1][chan],
0);
cmpval0 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][0][0]);
cmpval1 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
/* simplified lerp, AND mask with weight and add */
- colors_out[0] = lp_build_masklerp(&bld->texel_bld, s_fpart,
+ colors_out[0] = lp_build_masklerp(texel_bld, s_fpart,
cmpval0, cmpval1);
colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
}
}
if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
- /* Bilinear interpolate the four samples from the 2D image / 3D slice */
- for (chan = 0; chan < 4; chan++) {
- colors0[chan] = lp_build_lerp_2d(&bld->texel_bld,
- s_fpart, t_fpart,
- neighbors[0][0][chan],
- neighbors[0][1][chan],
- neighbors[1][0][chan],
- neighbors[1][1][chan],
- 0);
+ if (is_gather) {
+ /*
+ * Just assign the red channel (no component selection yet).
+ * This is a bit hackish, we usually do the swizzle at the
+ * end of sampling (much less values to swizzle), but this
+ * obviously cannot work when using gather.
+ */
+ unsigned chan_swiz = bld->static_texture_state->swizzle_r;
+ colors0[0] = lp_build_swizzle_soa_channel(texel_bld,
+ neighbors[1][0],
+ chan_swiz);
+ colors0[1] = lp_build_swizzle_soa_channel(texel_bld,
+ neighbors[1][1],
+ chan_swiz);
+ colors0[2] = lp_build_swizzle_soa_channel(texel_bld,
+ neighbors[0][1],
+ chan_swiz);
+ colors0[3] = lp_build_swizzle_soa_channel(texel_bld,
+ neighbors[0][0],
+ chan_swiz);
+ }
+ else {
+ /* Bilinear interpolate the four samples from the 2D image / 3D slice */
+ for (chan = 0; chan < 4; chan++) {
+ colors0[chan] = lp_build_lerp_2d(texel_bld,
+ s_fpart, t_fpart,
+ neighbors[0][0][chan],
+ neighbors[0][1][chan],
+ neighbors[1][0][chan],
+ neighbors[1][1][chan],
+ 0);
+ }
}
}
else {
cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
- colors0[0] = lp_build_masklerp2d(&bld->texel_bld, s_fpart, t_fpart,
- cmpval00, cmpval01, cmpval10, cmpval11);
- colors0[1] = colors0[2] = colors0[3] = colors0[0];
+
+ if (is_gather) {
+ /* more hacks for swizzling, should be X, ONE or ZERO... */
+ unsigned chan_swiz = bld->static_texture_state->swizzle_r;
+ if (chan_swiz <= PIPE_SWIZZLE_ALPHA) {
+ colors0[0] = lp_build_select(texel_bld, cmpval10,
+ texel_bld->one, texel_bld->zero);
+ colors0[1] = lp_build_select(texel_bld, cmpval11,
+ texel_bld->one, texel_bld->zero);
+ colors0[2] = lp_build_select(texel_bld, cmpval01,
+ texel_bld->one, texel_bld->zero);
+ colors0[3] = lp_build_select(texel_bld, cmpval00,
+ texel_bld->one, texel_bld->zero);
+ }
+ else if (chan_swiz == PIPE_SWIZZLE_ZERO) {
+ colors0[0] = colors0[1] = colors0[2] = colors0[3] =
+ texel_bld->zero;
+ }
+ else {
+ colors0[0] = colors0[1] = colors0[2] = colors0[3] =
+ texel_bld->one;
+ }
+ }
+ else {
+ colors0[0] = lp_build_masklerp2d(texel_bld, s_fpart, t_fpart,
+ cmpval00, cmpval01, cmpval10, cmpval11);
+ colors0[1] = colors0[2] = colors0[3] = colors0[0];
+ }
}
if (accurate_cube_corners) {
LLVMValueRef neighbors1[2][2][4];
LLVMValueRef colors1[4];
+ assert(!is_gather);
+
/* get x0/x1/y0/y1 texels at z1 */
lp_build_sample_texel_soa(bld,
width_vec, height_vec, depth_vec,
if (bld->static_sampler_state->compare_mode == PIPE_TEX_COMPARE_NONE) {
/* Bilinear interpolate the four samples from the second Z slice */
for (chan = 0; chan < 4; chan++) {
- colors1[chan] = lp_build_lerp_2d(&bld->texel_bld,
+ colors1[chan] = lp_build_lerp_2d(texel_bld,
s_fpart, t_fpart,
neighbors1[0][0][chan],
neighbors1[0][1][chan],
}
/* Linearly interpolate the two samples from the two 3D slices */
for (chan = 0; chan < 4; chan++) {
- colors_out[chan] = lp_build_lerp(&bld->texel_bld,
+ colors_out[chan] = lp_build_lerp(texel_bld,
r_fpart,
colors0[chan], colors1[chan],
0);
cmpval01 = lp_build_sample_comparefunc(bld, coords[4], neighbors[0][1][0]);
cmpval10 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][0][0]);
cmpval11 = lp_build_sample_comparefunc(bld, coords[4], neighbors[1][1][0]);
- colors1[0] = lp_build_masklerp2d(&bld->texel_bld, s_fpart, t_fpart,
+ colors1[0] = lp_build_masklerp2d(texel_bld, s_fpart, t_fpart,
cmpval00, cmpval01, cmpval10, cmpval11);
/* Linearly interpolate the two samples from the two 3D slices */
- colors_out[0] = lp_build_lerp(&bld->texel_bld,
- r_fpart,
- colors0[0], colors1[0],
- 0);
+ colors_out[0] = lp_build_lerp(texel_bld,
+ r_fpart,
+ colors0[0], colors1[0],
+ 0);
colors_out[1] = colors_out[2] = colors_out[3] = colors_out[0];
}
}
lp_build_sample_mipmap(struct lp_build_sample_context *bld,
unsigned img_filter,
unsigned mip_filter,
+ boolean is_gather,
LLVMValueRef *coords,
const LLVMValueRef *offsets,
LLVMValueRef ilevel0,
}
else {
assert(img_filter == PIPE_TEX_FILTER_LINEAR);
- lp_build_sample_image_linear(bld, size0, NULL,
+ lp_build_sample_image_linear(bld, is_gather, size0, NULL,
row_stride0_vec, img_stride0_vec,
data_ptr0, mipoff0, coords, offsets,
colors0);
colors1);
}
else {
- lp_build_sample_image_linear(bld, size1, NULL,
+ lp_build_sample_image_linear(bld, FALSE, size1, NULL,
row_stride1_vec, img_stride1_vec,
data_ptr1, mipoff1, coords, offsets,
colors1);
mipoff0 = lp_build_get_mip_offsets(bld, ilevel0);
}
- lp_build_sample_image_linear(bld, size0, linear_mask,
+ lp_build_sample_image_linear(bld, FALSE, size0, linear_mask,
row_stride0_vec, img_stride0_vec,
data_ptr0, mipoff0, coords, offsets,
colors0);
mipoff1 = lp_build_get_mip_offsets(bld, ilevel1);
}
- lp_build_sample_image_linear(bld, size1, linear_mask,
+ lp_build_sample_image_linear(bld, FALSE, size1, linear_mask,
row_stride1_vec, img_stride1_vec,
data_ptr1, mipoff1, coords, offsets,
colors1);
LLVMPointerType(vec4_bld.vec_type, 0), "");
border_color = LLVMBuildLoad(builder, border_color_ptr, "");
/* we don't have aligned type in the dynamic state unfortunately */
- lp_set_load_alignment(border_color, 4);
+ LLVMSetAlignment(border_color, 4);
/*
* Instead of having some incredibly complex logic which will try to figure out
static void
lp_build_sample_general(struct lp_build_sample_context *bld,
unsigned sampler_unit,
+ boolean is_gather,
LLVMValueRef *coords,
const LLVMValueRef *offsets,
LLVMValueRef lod_positive,
if (min_filter == mag_filter) {
/* no need to distinguish between minification and magnification */
lp_build_sample_mipmap(bld, min_filter, mip_filter,
+ is_gather,
coords, offsets,
ilevel0, ilevel1, lod_fpart,
texels);
lp_build_if(&if_ctx, bld->gallivm, lod_positive);
{
/* Use the minification filter */
- lp_build_sample_mipmap(bld, min_filter, mip_filter,
+ lp_build_sample_mipmap(bld, min_filter, mip_filter, FALSE,
coords, offsets,
ilevel0, ilevel1, lod_fpart,
texels);
{
/* Use the magnification filter */
lp_build_sample_mipmap(bld, mag_filter, PIPE_TEX_MIPFILTER_NONE,
+ FALSE,
coords, offsets,
ilevel0, NULL, NULL,
texels);
* All pixels require just nearest filtering, which is way
* cheaper than linear, hence do a separate path for that.
*/
- lp_build_sample_mipmap(bld, PIPE_TEX_FILTER_NEAREST,
+ lp_build_sample_mipmap(bld, PIPE_TEX_FILTER_NEAREST, FALSE,
mip_filter_for_nearest,
coords, offsets,
ilevel0, ilevel1, lod_fpart,
bld->texel_type,
bld->base_ptr, offset,
i, j,
+ bld->cache,
colors_out);
if (out_of_bound_ret_zero) {
/**
- * Build texture sampling code.
+ * Build the actual texture sampling code.
* 'texel' will return a vector of four LLVMValueRefs corresponding to
* R, G, B, A.
* \param type vector float type to use for coords, etc.
- * \param is_fetch if this is a texel fetch instruction.
+ * \param sample_key
* \param derivs partial derivatives of (s,t,r,q) with respect to x and y
*/
-void
-lp_build_sample_soa(struct gallivm_state *gallivm,
- const struct lp_static_texture_state *static_texture_state,
- const struct lp_static_sampler_state *static_sampler_state,
- struct lp_sampler_dynamic_state *dynamic_state,
- struct lp_type type,
- boolean is_fetch,
- unsigned texture_index,
- unsigned sampler_index,
- LLVMValueRef context_ptr,
- const LLVMValueRef *coords,
- const LLVMValueRef *offsets,
- const struct lp_derivatives *derivs, /* optional */
- LLVMValueRef lod_bias, /* optional */
- LLVMValueRef explicit_lod, /* optional */
- enum lp_sampler_lod_property lod_property,
- LLVMValueRef texel_out[4])
+static void
+lp_build_sample_soa_code(struct gallivm_state *gallivm,
+ const struct lp_static_texture_state *static_texture_state,
+ const struct lp_static_sampler_state *static_sampler_state,
+ struct lp_sampler_dynamic_state *dynamic_state,
+ struct lp_type type,
+ unsigned sample_key,
+ unsigned texture_index,
+ unsigned sampler_index,
+ LLVMValueRef context_ptr,
+ LLVMValueRef thread_data_ptr,
+ const LLVMValueRef *coords,
+ const LLVMValueRef *offsets,
+ const struct lp_derivatives *derivs, /* optional */
+ LLVMValueRef lod, /* optional */
+ LLVMValueRef texel_out[4])
{
unsigned target = static_texture_state->target;
unsigned dims = texture_dims(target);
LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context);
LLVMBuilderRef builder = gallivm->builder;
LLVMValueRef tex_width, newcoords[5];
+ enum lp_sampler_lod_property lod_property;
+ enum lp_sampler_lod_control lod_control;
+ enum lp_sampler_op_type op_type;
+ LLVMValueRef lod_bias = NULL;
+ LLVMValueRef explicit_lod = NULL;
+ boolean op_is_tex;
if (0) {
enum pipe_format fmt = static_texture_state->format;
debug_printf("Sample from %s\n", util_format_name(fmt));
}
+ lod_property = (sample_key & LP_SAMPLER_LOD_PROPERTY_MASK) >>
+ LP_SAMPLER_LOD_PROPERTY_SHIFT;
+ lod_control = (sample_key & LP_SAMPLER_LOD_CONTROL_MASK) >>
+ LP_SAMPLER_LOD_CONTROL_SHIFT;
+ op_type = (sample_key & LP_SAMPLER_OP_TYPE_MASK) >>
+ LP_SAMPLER_OP_TYPE_SHIFT;
+
+ op_is_tex = op_type == LP_SAMPLER_OP_TEXTURE;
+
+ if (lod_control == LP_SAMPLER_LOD_BIAS) {
+ lod_bias = lod;
+ assert(lod);
+ assert(derivs == NULL);
+ }
+ else if (lod_control == LP_SAMPLER_LOD_EXPLICIT) {
+ explicit_lod = lod;
+ assert(lod);
+ assert(derivs == NULL);
+ }
+ else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
+ assert(derivs);
+ assert(lod == NULL);
+ }
+ else {
+ assert(derivs == NULL);
+ assert(lod == NULL);
+ }
+
if (static_texture_state->format == PIPE_FORMAT_NONE) {
/*
* If there's nothing bound, format is NONE, and we must return
* all zero as mandated by d3d10 in this case.
*/
unsigned chan;
- LLVMValueRef zero = lp_build_const_vec(gallivm, type, 0.0F);
+ LLVMValueRef zero = lp_build_zero(gallivm, type);
for (chan = 0; chan < 4; chan++) {
texel_out[chan] = zero;
}
} else {
derived_sampler_state.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
}
+ if (op_type == LP_SAMPLER_OP_GATHER) {
+ /*
+ * gather4 is exactly like GL_LINEAR filtering but in the end skipping
+ * the actual filtering. Using mostly the same paths, so cube face
+ * selection, coord wrapping etc. all naturally uses the same code.
+ */
+ derived_sampler_state.min_mip_filter = PIPE_TEX_MIPFILTER_NONE;
+ derived_sampler_state.min_img_filter = PIPE_TEX_FILTER_LINEAR;
+ derived_sampler_state.mag_img_filter = PIPE_TEX_FILTER_LINEAR;
+ }
mip_filter = derived_sampler_state.min_mip_filter;
if (0) {
derived_sampler_state.wrap_s = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
derived_sampler_state.wrap_t = PIPE_TEX_WRAP_CLAMP_TO_EDGE;
}
+ /*
+ * We could force CLAMP to CLAMP_TO_EDGE here if min/mag filter is nearest,
+ * so AoS path could be used. Not sure it's worth the trouble...
+ */
min_img_filter = derived_sampler_state.min_img_filter;
mag_img_filter = derived_sampler_state.mag_img_filter;
(gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) &&
(static_texture_state->target == PIPE_TEXTURE_CUBE ||
static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
- (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+ (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
/*
* special case for using per-pixel lod even for implicit lod,
* which is generally never required (ok by APIs) except to please
}
else if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT ||
(explicit_lod || lod_bias || derivs)) {
- if ((is_fetch && target != PIPE_BUFFER) ||
- (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+ if ((!op_is_tex && target != PIPE_BUFFER) ||
+ (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
bld.num_mips = type.length;
bld.num_lods = type.length;
}
- else if (!is_fetch && min_img_filter != mag_img_filter) {
+ else if (op_is_tex && min_img_filter != mag_img_filter) {
bld.num_mips = 1;
bld.num_lods = type.length;
}
}
/* TODO: for true scalar_lod should only use 1 lod value */
- else if ((is_fetch && explicit_lod && target != PIPE_BUFFER) ||
- (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+ else if ((!op_is_tex && explicit_lod && target != PIPE_BUFFER) ||
+ (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
bld.num_mips = num_quads;
bld.num_lods = num_quads;
}
- else if (!is_fetch && min_img_filter != mag_img_filter) {
+ else if (op_is_tex && min_img_filter != mag_img_filter) {
bld.num_mips = 1;
bld.num_lods = num_quads;
}
context_ptr, texture_index);
/* Note that mip_offsets is an array[level] of offsets to texture images */
+ if (dynamic_state->cache_ptr && thread_data_ptr) {
+ bld.cache = dynamic_state->cache_ptr(dynamic_state, gallivm,
+ thread_data_ptr, texture_index);
+ }
+
/* width, height, depth as single int vector */
if (dims <= 1) {
bld.int_size = tex_width;
texel_out);
}
- else if (is_fetch) {
+ else if (op_type == LP_SAMPLER_OP_FETCH) {
lp_build_fetch_texel(&bld, texture_index, newcoords,
- explicit_lod, offsets,
+ lod, offsets,
texel_out);
}
else {
LLVMValueRef lod_fpart = NULL, lod_positive = NULL;
LLVMValueRef ilevel0 = NULL, ilevel1 = NULL;
- boolean use_aos = util_format_fits_8unorm(bld.format_desc) &&
- /* not sure this is strictly needed or simply impossible */
- derived_sampler_state.compare_mode == PIPE_TEX_COMPARE_NONE &&
- lp_is_simple_wrap_mode(derived_sampler_state.wrap_s);
+ boolean use_aos;
+
+ if (util_format_is_pure_integer(static_texture_state->format) &&
+ !util_format_has_depth(bld.format_desc) &&
+ (static_sampler_state->min_mip_filter == PIPE_TEX_MIPFILTER_LINEAR ||
+ static_sampler_state->min_img_filter == PIPE_TEX_FILTER_LINEAR ||
+ static_sampler_state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)) {
+ /*
+ * Bail if impossible filtering is specified (the awkard additional
+ * depth check is because it is legal in gallium to have things like S8Z24
+ * here which would say it's pure int despite such formats should sample
+ * the depth component).
+ * In GL such filters make the texture incomplete, this makes it robust
+ * against state trackers which set this up regardless (we'd crash in the
+ * lerp later (except for gather)).
+ * Must do this after fetch_texel code since with GL state tracker we'll
+ * get some junk sampler for buffer textures.
+ */
+ unsigned chan;
+ LLVMValueRef zero = lp_build_zero(gallivm, type);
+ for (chan = 0; chan < 4; chan++) {
+ texel_out[chan] = zero;
+ }
+ return;
+ }
+
+ use_aos = util_format_fits_8unorm(bld.format_desc) &&
+ op_is_tex &&
+ /* not sure this is strictly needed or simply impossible */
+ derived_sampler_state.compare_mode == PIPE_TEX_COMPARE_NONE &&
+ lp_is_simple_wrap_mode(derived_sampler_state.wrap_s);
use_aos &= bld.num_lods <= num_quads ||
derived_sampler_state.min_img_filter ==
else {
lp_build_sample_general(&bld, sampler_index,
+ op_type == LP_SAMPLER_OP_GATHER,
newcoords, offsets,
lod_positive, lod_fpart,
ilevel0, ilevel1,
bld4.base_ptr = bld.base_ptr;
bld4.mip_offsets = bld.mip_offsets;
bld4.int_size = bld.int_size;
+ bld4.cache = bld.cache;
bld4.vector_width = lp_type_width(type4);
(gallivm_debug & GALLIVM_DEBUG_NO_RHO_APPROX) &&
(static_texture_state->target == PIPE_TEXTURE_CUBE ||
static_texture_state->target == PIPE_TEXTURE_CUBE_ARRAY) &&
- (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+ (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
bld4.num_mips = type4.length;
bld4.num_lods = type4.length;
}
if (lod_property == LP_SAMPLER_LOD_PER_ELEMENT &&
(explicit_lod || lod_bias || derivs)) {
- if ((is_fetch && target != PIPE_BUFFER) ||
- (!is_fetch && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
+ if ((!op_is_tex && target != PIPE_BUFFER) ||
+ (op_is_tex && mip_filter != PIPE_TEX_MIPFILTER_NONE)) {
bld4.num_mips = type4.length;
bld4.num_lods = type4.length;
}
- else if (!is_fetch && min_img_filter != mag_img_filter) {
+ else if (op_is_tex && min_img_filter != mag_img_filter) {
bld4.num_mips = 1;
bld4.num_lods = type4.length;
}
newcoords4[4] = lp_build_extract_range(gallivm, newcoords[4], 4*i, 4);
lp_build_sample_general(&bld4, sampler_index,
+ op_type == LP_SAMPLER_OP_GATHER,
newcoords4, offsets4,
lod_positive4, lod_fpart4,
ilevel04, ilevel14,
}
}
- if (target != PIPE_BUFFER) {
+ if (target != PIPE_BUFFER && op_type != LP_SAMPLER_OP_GATHER) {
apply_sampler_swizzle(&bld, texel_out);
}
}
+#define USE_TEX_FUNC_CALL 1
+
+#define LP_MAX_TEX_FUNC_ARGS 32
+
+static inline void
+get_target_info(enum pipe_texture_target target,
+ unsigned *num_coords, unsigned *num_derivs,
+ unsigned *num_offsets, unsigned *layer)
+{
+ unsigned dims = texture_dims(target);
+ *num_coords = dims;
+ *num_offsets = dims;
+ *num_derivs = (target == PIPE_TEXTURE_CUBE ||
+ target == PIPE_TEXTURE_CUBE_ARRAY) ? 3 : dims;
+ *layer = has_layer_coord(target) ? 2: 0;
+ if (target == PIPE_TEXTURE_CUBE_ARRAY) {
+ /*
+ * dims doesn't include r coord for cubes - this is handled
+ * by layer instead, but need to fix up for cube arrays...
+ */
+ *layer = 3;
+ *num_coords = 3;
+ }
+}
+
+
+/**
+ * Generate the function body for a texture sampling function.
+ */
+static void
+lp_build_sample_gen_func(struct gallivm_state *gallivm,
+ const struct lp_static_texture_state *static_texture_state,
+ const struct lp_static_sampler_state *static_sampler_state,
+ struct lp_sampler_dynamic_state *dynamic_state,
+ struct lp_type type,
+ unsigned texture_index,
+ unsigned sampler_index,
+ LLVMValueRef function,
+ unsigned num_args,
+ unsigned sample_key)
+{
+ LLVMBuilderRef old_builder;
+ LLVMBasicBlockRef block;
+ LLVMValueRef coords[5];
+ LLVMValueRef offsets[3] = { NULL };
+ LLVMValueRef lod = NULL;
+ LLVMValueRef context_ptr;
+ LLVMValueRef thread_data_ptr = NULL;
+ LLVMValueRef texel_out[4];
+ struct lp_derivatives derivs;
+ struct lp_derivatives *deriv_ptr = NULL;
+ unsigned num_param = 0;
+ unsigned i, num_coords, num_derivs, num_offsets, layer;
+ enum lp_sampler_lod_control lod_control;
+ boolean need_cache = FALSE;
+
+ lod_control = (sample_key & LP_SAMPLER_LOD_CONTROL_MASK) >>
+ LP_SAMPLER_LOD_CONTROL_SHIFT;
+
+ get_target_info(static_texture_state->target,
+ &num_coords, &num_derivs, &num_offsets, &layer);
+
+ if (dynamic_state->cache_ptr) {
+ const struct util_format_description *format_desc;
+ format_desc = util_format_description(static_texture_state->format);
+ if (format_desc && format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
+ need_cache = TRUE;
+ }
+ }
+
+ /* "unpack" arguments */
+ context_ptr = LLVMGetParam(function, num_param++);
+ if (need_cache) {
+ thread_data_ptr = LLVMGetParam(function, num_param++);
+ }
+ for (i = 0; i < num_coords; i++) {
+ coords[i] = LLVMGetParam(function, num_param++);
+ }
+ for (i = num_coords; i < 5; i++) {
+ /* This is rather unfortunate... */
+ coords[i] = lp_build_undef(gallivm, type);
+ }
+ if (layer) {
+ coords[layer] = LLVMGetParam(function, num_param++);
+ }
+ if (sample_key & LP_SAMPLER_SHADOW) {
+ coords[4] = LLVMGetParam(function, num_param++);
+ }
+ if (sample_key & LP_SAMPLER_OFFSETS) {
+ for (i = 0; i < num_offsets; i++) {
+ offsets[i] = LLVMGetParam(function, num_param++);
+ }
+ }
+ if (lod_control == LP_SAMPLER_LOD_BIAS ||
+ lod_control == LP_SAMPLER_LOD_EXPLICIT) {
+ lod = LLVMGetParam(function, num_param++);
+ }
+ else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
+ for (i = 0; i < num_derivs; i++) {
+ derivs.ddx[i] = LLVMGetParam(function, num_param++);
+ derivs.ddy[i] = LLVMGetParam(function, num_param++);
+ }
+ deriv_ptr = &derivs;
+ }
+
+ assert(num_args == num_param);
+
+ /*
+ * Function body
+ */
+
+ old_builder = gallivm->builder;
+ block = LLVMAppendBasicBlockInContext(gallivm->context, function, "entry");
+ gallivm->builder = LLVMCreateBuilderInContext(gallivm->context);
+ LLVMPositionBuilderAtEnd(gallivm->builder, block);
+
+ lp_build_sample_soa_code(gallivm,
+ static_texture_state,
+ static_sampler_state,
+ dynamic_state,
+ type,
+ sample_key,
+ texture_index,
+ sampler_index,
+ context_ptr,
+ thread_data_ptr,
+ coords,
+ offsets,
+ deriv_ptr,
+ lod,
+ texel_out);
+
+ LLVMBuildAggregateRet(gallivm->builder, texel_out, 4);
+
+ LLVMDisposeBuilder(gallivm->builder);
+ gallivm->builder = old_builder;
+
+ gallivm_verify_function(gallivm, function);
+}
+
+
+/**
+ * Call the matching function for texture sampling.
+ * If there's no match, generate a new one.
+ */
+static void
+lp_build_sample_soa_func(struct gallivm_state *gallivm,
+ const struct lp_static_texture_state *static_texture_state,
+ const struct lp_static_sampler_state *static_sampler_state,
+ struct lp_sampler_dynamic_state *dynamic_state,
+ const struct lp_sampler_params *params)
+{
+ LLVMBuilderRef builder = gallivm->builder;
+ LLVMModuleRef module = LLVMGetGlobalParent(LLVMGetBasicBlockParent(
+ LLVMGetInsertBlock(builder)));
+ LLVMValueRef function, inst;
+ LLVMValueRef args[LP_MAX_TEX_FUNC_ARGS];
+ LLVMBasicBlockRef bb;
+ LLVMValueRef tex_ret;
+ unsigned num_args = 0;
+ char func_name[64];
+ unsigned i, num_coords, num_derivs, num_offsets, layer;
+ unsigned texture_index = params->texture_index;
+ unsigned sampler_index = params->sampler_index;
+ unsigned sample_key = params->sample_key;
+ const LLVMValueRef *coords = params->coords;
+ const LLVMValueRef *offsets = params->offsets;
+ const struct lp_derivatives *derivs = params->derivs;
+ enum lp_sampler_lod_control lod_control;
+ boolean need_cache = FALSE;
+
+ lod_control = (sample_key & LP_SAMPLER_LOD_CONTROL_MASK) >>
+ LP_SAMPLER_LOD_CONTROL_SHIFT;
+
+ get_target_info(static_texture_state->target,
+ &num_coords, &num_derivs, &num_offsets, &layer);
+
+ if (dynamic_state->cache_ptr) {
+ const struct util_format_description *format_desc;
+ format_desc = util_format_description(static_texture_state->format);
+ if (format_desc && format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
+ /*
+ * This is not 100% correct, if we have cache but the
+ * util_format_s3tc_prefer is true the cache won't get used
+ * regardless (could hook up the block decode there...) */
+ need_cache = TRUE;
+ }
+ }
+ /*
+ * texture function matches are found by name.
+ * Thus the name has to include both the texture and sampler unit
+ * (which covers all static state) plus the actual texture function
+ * (including things like offsets, shadow coord, lod control).
+ * Additionally lod_property has to be included too.
+ */
+
+ util_snprintf(func_name, sizeof(func_name), "texfunc_res_%d_sam_%d_%x",
+ texture_index, sampler_index, sample_key);
+
+ function = LLVMGetNamedFunction(module, func_name);
+
+ if(!function) {
+ LLVMTypeRef arg_types[LP_MAX_TEX_FUNC_ARGS];
+ LLVMTypeRef ret_type;
+ LLVMTypeRef function_type;
+ LLVMTypeRef val_type[4];
+ unsigned num_param = 0;
+
+ /*
+ * Generate the function prototype.
+ */
+
+ arg_types[num_param++] = LLVMTypeOf(params->context_ptr);
+ if (need_cache) {
+ arg_types[num_param++] = LLVMTypeOf(params->thread_data_ptr);
+ }
+ for (i = 0; i < num_coords; i++) {
+ arg_types[num_param++] = LLVMTypeOf(coords[0]);
+ assert(LLVMTypeOf(coords[0]) == LLVMTypeOf(coords[i]));
+ }
+ if (layer) {
+ arg_types[num_param++] = LLVMTypeOf(coords[layer]);
+ assert(LLVMTypeOf(coords[0]) == LLVMTypeOf(coords[layer]));
+ }
+ if (sample_key & LP_SAMPLER_SHADOW) {
+ arg_types[num_param++] = LLVMTypeOf(coords[0]);
+ }
+ if (sample_key & LP_SAMPLER_OFFSETS) {
+ for (i = 0; i < num_offsets; i++) {
+ arg_types[num_param++] = LLVMTypeOf(offsets[0]);
+ assert(LLVMTypeOf(offsets[0]) == LLVMTypeOf(offsets[i]));
+ }
+ }
+ if (lod_control == LP_SAMPLER_LOD_BIAS ||
+ lod_control == LP_SAMPLER_LOD_EXPLICIT) {
+ arg_types[num_param++] = LLVMTypeOf(params->lod);
+ }
+ else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
+ for (i = 0; i < num_derivs; i++) {
+ arg_types[num_param++] = LLVMTypeOf(derivs->ddx[i]);
+ arg_types[num_param++] = LLVMTypeOf(derivs->ddy[i]);
+ assert(LLVMTypeOf(derivs->ddx[0]) == LLVMTypeOf(derivs->ddx[i]));
+ assert(LLVMTypeOf(derivs->ddy[0]) == LLVMTypeOf(derivs->ddy[i]));
+ }
+ }
+
+ val_type[0] = val_type[1] = val_type[2] = val_type[3] =
+ lp_build_vec_type(gallivm, params->type);
+ ret_type = LLVMStructTypeInContext(gallivm->context, val_type, 4, 0);
+ function_type = LLVMFunctionType(ret_type, arg_types, num_param, 0);
+ function = LLVMAddFunction(module, func_name, function_type);
+
+ for (i = 0; i < num_param; ++i) {
+ if(LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind) {
+ LLVMAddAttribute(LLVMGetParam(function, i), LLVMNoAliasAttribute);
+ }
+ }
+
+ LLVMSetFunctionCallConv(function, LLVMFastCallConv);
+ LLVMSetLinkage(function, LLVMPrivateLinkage);
+
+ lp_build_sample_gen_func(gallivm,
+ static_texture_state,
+ static_sampler_state,
+ dynamic_state,
+ params->type,
+ texture_index,
+ sampler_index,
+ function,
+ num_param,
+ sample_key);
+ }
+
+ num_args = 0;
+ args[num_args++] = params->context_ptr;
+ if (need_cache) {
+ args[num_args++] = params->thread_data_ptr;
+ }
+ for (i = 0; i < num_coords; i++) {
+ args[num_args++] = coords[i];
+ }
+ if (layer) {
+ args[num_args++] = coords[layer];
+ }
+ if (sample_key & LP_SAMPLER_SHADOW) {
+ args[num_args++] = coords[4];
+ }
+ if (sample_key & LP_SAMPLER_OFFSETS) {
+ for (i = 0; i < num_offsets; i++) {
+ args[num_args++] = offsets[i];
+ }
+ }
+ if (lod_control == LP_SAMPLER_LOD_BIAS ||
+ lod_control == LP_SAMPLER_LOD_EXPLICIT) {
+ args[num_args++] = params->lod;
+ }
+ else if (lod_control == LP_SAMPLER_LOD_DERIVATIVES) {
+ for (i = 0; i < num_derivs; i++) {
+ args[num_args++] = derivs->ddx[i];
+ args[num_args++] = derivs->ddy[i];
+ }
+ }
+
+ assert(num_args <= LP_MAX_TEX_FUNC_ARGS);
+
+ tex_ret = LLVMBuildCall(builder, function, args, num_args, "");
+ bb = LLVMGetInsertBlock(builder);
+ inst = LLVMGetLastInstruction(bb);
+ LLVMSetInstructionCallConv(inst, LLVMFastCallConv);
+
+ for (i = 0; i < 4; i++) {
+ params->texel[i] = LLVMBuildExtractValue(gallivm->builder, tex_ret, i, "");
+ }
+}
+
+
+/**
+ * Build texture sampling code.
+ * Either via a function call or inline it directly.
+ */
+void
+lp_build_sample_soa(const struct lp_static_texture_state *static_texture_state,
+ const struct lp_static_sampler_state *static_sampler_state,
+ struct lp_sampler_dynamic_state *dynamic_state,
+ struct gallivm_state *gallivm,
+ const struct lp_sampler_params *params)
+{
+ boolean use_tex_func = FALSE;
+
+ /*
+ * Do not use a function call if the sampling is "simple enough".
+ * We define this by
+ * a) format
+ * b) no mips (either one level only or no mip filter)
+ * No mips will definitely make the code smaller, though
+ * the format requirement is a bit iffy - there's some (SoA) formats
+ * which definitely generate less code. This does happen to catch
+ * some important cases though which are hurt quite a bit by using
+ * a call (though not really because of the call overhead but because
+ * they are reusing the same texture unit with some of the same
+ * parameters).
+ * Ideally we'd let llvm recognize this stuff by doing IPO passes.
+ */
+
+ if (USE_TEX_FUNC_CALL) {
+ const struct util_format_description *format_desc;
+ boolean simple_format;
+ boolean simple_tex;
+ enum lp_sampler_op_type op_type;
+ format_desc = util_format_description(static_texture_state->format);
+ simple_format = !format_desc ||
+ (util_format_is_rgba8_variant(format_desc) &&
+ format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB);
+
+ op_type = (params->sample_key & LP_SAMPLER_OP_TYPE_MASK) >>
+ LP_SAMPLER_OP_TYPE_SHIFT;
+ simple_tex =
+ op_type != LP_SAMPLER_OP_TEXTURE ||
+ ((static_sampler_state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE ||
+ static_texture_state->level_zero_only == TRUE) &&
+ static_sampler_state->min_img_filter == static_sampler_state->mag_img_filter);
+
+ use_tex_func = format_desc && !(simple_format && simple_tex);
+ }
+
+ if (use_tex_func) {
+ lp_build_sample_soa_func(gallivm,
+ static_texture_state,
+ static_sampler_state,
+ dynamic_state,
+ params);
+ }
+ else {
+ lp_build_sample_soa_code(gallivm,
+ static_texture_state,
+ static_sampler_state,
+ dynamic_state,
+ params->type,
+ params->sample_key,
+ params->texture_index,
+ params->sampler_index,
+ params->context_ptr,
+ params->thread_data_ptr,
+ params->coords,
+ params->offsets,
+ params->derivs,
+ params->lod,
+ params->texel);
+ }
+}
+
+
void
lp_build_size_query_soa(struct gallivm_state *gallivm,
const struct lp_static_texture_state *static_state,