}
+/**
+ * Helper to compute the first coord and the weight for
+ * linear wrap repeat npot textures
+ */
+static void
+lp_build_coord_repeat_npot_linear_int(struct lp_build_sample_context *bld,
+ LLVMValueRef coord_f,
+ LLVMValueRef length_i,
+ LLVMValueRef length_f,
+ LLVMValueRef *coord0_i,
+ LLVMValueRef *weight_i)
+{
+ struct lp_build_context *coord_bld = &bld->coord_bld;
+ struct lp_build_context *int_coord_bld = &bld->int_coord_bld;
+ struct lp_build_context abs_coord_bld;
+ struct lp_type abs_type;
+ LLVMValueRef length_minus_one = lp_build_sub(int_coord_bld, length_i,
+ int_coord_bld->one);
+ LLVMValueRef mask, i32_c8, i32_c128, i32_c255;
+
+ /* wrap with normalized floats is just fract */
+ coord_f = lp_build_fract(coord_bld, coord_f);
+ /* mul by size */
+ coord_f = lp_build_mul(coord_bld, coord_f, length_f);
+ /* convert to int, compute lerp weight */
+ coord_f = lp_build_mul_imm(&bld->coord_bld, coord_f, 256);
+
+ /* At this point we don't have any negative numbers so use non-signed
+ * build context which might help on some archs.
+ */
+ abs_type = coord_bld->type;
+ abs_type.sign = 0;
+ lp_build_context_init(&abs_coord_bld, bld->gallivm, abs_type);
+ *coord0_i = lp_build_iround(&abs_coord_bld, coord_f);
+
+ /* subtract 0.5 (add -128) */
+ i32_c128 = lp_build_const_int_vec(bld->gallivm, bld->int_coord_type, -128);
+ *coord0_i = LLVMBuildAdd(bld->gallivm->builder, *coord0_i, i32_c128, "");
+
+ /* compute fractional part (AND with 0xff) */
+ i32_c255 = lp_build_const_int_vec(bld->gallivm, bld->int_coord_type, 255);
+ *weight_i = LLVMBuildAnd(bld->gallivm->builder, *coord0_i, i32_c255, "");
+
+ /* compute floor (shift right 8) */
+ i32_c8 = lp_build_const_int_vec(bld->gallivm, bld->int_coord_type, 8);
+ *coord0_i = LLVMBuildAShr(bld->gallivm->builder, *coord0_i, i32_c8, "");
+ /*
+ * we avoided the 0.5/length division before the repeat wrap,
+ * now need to fix up edge cases with selects
+ */
+ mask = lp_build_compare(int_coord_bld->gallivm, int_coord_bld->type,
+ PIPE_FUNC_LESS, *coord0_i, int_coord_bld->zero);
+ *coord0_i = lp_build_select(int_coord_bld, mask, length_minus_one, *coord0_i);
+}
+
+
/**
* Build LLVM code for texture coord wrapping, for linear filtering,
* for scaled integer texcoords.
}
else {
LLVMValueRef mask;
- LLVMValueRef weight;
LLVMValueRef length_f = lp_build_int_to_float(&bld->coord_bld, length);
if (offset) {
offset = lp_build_int_to_float(&bld->coord_bld, offset);
offset = lp_build_div(&bld->coord_bld, offset, length_f);
coord_f = lp_build_add(&bld->coord_bld, coord_f, offset);
}
- lp_build_coord_repeat_npot_linear(bld, coord_f,
- length, length_f,
- &coord0, &weight);
+ lp_build_coord_repeat_npot_linear_int(bld, coord_f,
+ length, length_f,
+ &coord0, weight_i);
mask = lp_build_compare(bld->gallivm, int_coord_bld->type,
PIPE_FUNC_NOTEQUAL, coord0, length_minus_one);
coord1 = LLVMBuildAnd(builder,
lp_build_add(int_coord_bld, coord0,
int_coord_bld->one),
mask, "");
- weight = lp_build_mul_imm(&bld->coord_bld, weight, 256);
- *weight_i = lp_build_itrunc(&bld->coord_bld, weight);
}
break;
coord0 = LLVMBuildAnd(builder, coord0, length_minus_one, "");
}
else {
- LLVMValueRef weight;
LLVMValueRef length_f = lp_build_int_to_float(&bld->coord_bld, length);
if (offset) {
offset = lp_build_int_to_float(&bld->coord_bld, offset);
offset = lp_build_div(&bld->coord_bld, offset, length_f);
coord_f = lp_build_add(&bld->coord_bld, coord_f, offset);
}
- lp_build_coord_repeat_npot_linear(bld, coord_f,
- length, length_f,
- &coord0, &weight);
- weight = lp_build_mul_imm(&bld->coord_bld, weight, 256);
- *weight_i = lp_build_itrunc(&bld->coord_bld, weight);
+ lp_build_coord_repeat_npot_linear_int(bld, coord_f,
+ length, length_f,
+ &coord0, weight_i);
}
mask = lp_build_compare(bld->gallivm, int_coord_bld->type,
bld->texel_type.length,
bld->format_desc->block.bits,
bld->texel_type.width,
+ TRUE,
data_ptr, offset, TRUE);
rgba8 = LLVMBuildBitCast(builder, rgba8, u8n_vec_type, "");
rgba8 = lp_build_fetch_rgba_aos(bld->gallivm,
bld->format_desc,
u8n.type,
+ TRUE,
data_ptr, offset,
x_subcoord,
y_subcoord);
LLVMValueRef *colors)
{
const unsigned dims = bld->dims;
- LLVMBuilderRef builder = bld->gallivm->builder;
struct lp_build_context i32;
- LLVMTypeRef i32_vec_type;
- LLVMValueRef i32_c8;
LLVMValueRef width_vec, height_vec, depth_vec;
LLVMValueRef s_ipart, t_ipart = NULL, r_ipart = NULL;
LLVMValueRef s_float, t_float = NULL, r_float = NULL;
lp_build_context_init(&i32, bld->gallivm, lp_type_int_vec(32, bld->vector_width));
- i32_vec_type = lp_build_vec_type(bld->gallivm, i32.type);
-
lp_build_extract_image_sizes(bld,
&bld->int_size_bld,
bld->int_coord_type,
s_float = s; t_float = t; r_float = r;
if (bld->static_sampler_state->normalized_coords) {
- LLVMValueRef scaled_size;
LLVMValueRef flt_size;
- /* scale size by 256 (8 fractional bits) */
- scaled_size = lp_build_shl_imm(&bld->int_size_bld, int_size, 8);
-
- flt_size = lp_build_int_to_float(&bld->float_size_bld, scaled_size);
+ flt_size = lp_build_int_to_float(&bld->float_size_bld, int_size);
lp_build_unnormalized_coords(bld, flt_size, &s, &t, &r);
}
- else {
- /* scale coords by 256 (8 fractional bits) */
- s = lp_build_mul_imm(&bld->coord_bld, s, 256);
- if (dims >= 2)
- t = lp_build_mul_imm(&bld->coord_bld, t, 256);
- if (dims >= 3)
- r = lp_build_mul_imm(&bld->coord_bld, r, 256);
- }
/* convert float to int */
- s = LLVMBuildFPToSI(builder, s, i32_vec_type, "");
- if (dims >= 2)
- t = LLVMBuildFPToSI(builder, t, i32_vec_type, "");
- if (dims >= 3)
- r = LLVMBuildFPToSI(builder, r, i32_vec_type, "");
-
- /* compute floor (shift right 8) */
- i32_c8 = lp_build_const_int_vec(bld->gallivm, i32.type, 8);
- s_ipart = LLVMBuildAShr(builder, s, i32_c8, "");
+ /* For correct rounding, need floor, not truncation here.
+ * Note that in some cases (clamp to edge, no texel offsets) we
+ * could use a non-signed build context which would help archs
+ * greatly which don't have arch rounding.
+ */
+ s_ipart = lp_build_ifloor(&bld->coord_bld, s);
if (dims >= 2)
- t_ipart = LLVMBuildAShr(builder, t, i32_c8, "");
+ t_ipart = lp_build_ifloor(&bld->coord_bld, t);
if (dims >= 3)
- r_ipart = LLVMBuildAShr(builder, r, i32_c8, "");
+ r_ipart = lp_build_ifloor(&bld->coord_bld, r);
/* add texel offsets */
if (offsets[0]) {
offset = lp_build_add(&bld->int_coord_bld, offset, z_offset);
}
}
- if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
- bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
- bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
+ if (has_layer_coord(bld->static_texture_state->target)) {
LLVMValueRef z_offset;
/* The r coord is the cube face in [0,5] or array layer */
z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec);
&z_icoord);
}
}
- if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
- bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
- bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
+ if (has_layer_coord(bld->static_texture_state->target)) {
z_icoord = r;
}
bld->texel_type.length,
bld->format_desc->block.bits,
bld->texel_type.width,
+ TRUE,
data_ptr, offset[k][j][i], TRUE);
rgba8 = LLVMBuildBitCast(builder, rgba8, u8n_vec_type, "");
rgba8 = lp_build_fetch_rgba_aos(bld->gallivm,
bld->format_desc,
u8n.type,
+ TRUE,
data_ptr, offset[k][j][i],
x_subcoord[i],
y_subcoord[j]);
const unsigned dims = bld->dims;
LLVMBuilderRef builder = bld->gallivm->builder;
struct lp_build_context i32;
- LLVMTypeRef i32_vec_type;
LLVMValueRef i32_c8, i32_c128, i32_c255;
LLVMValueRef width_vec, height_vec, depth_vec;
LLVMValueRef s_ipart, s_fpart, s_float;
lp_build_context_init(&i32, bld->gallivm, lp_type_int_vec(32, bld->vector_width));
- i32_vec_type = lp_build_vec_type(bld->gallivm, i32.type);
-
lp_build_extract_image_sizes(bld,
&bld->int_size_bld,
bld->int_coord_type,
}
/* convert float to int */
- s = LLVMBuildFPToSI(builder, s, i32_vec_type, "");
+ /* For correct rounding, need round to nearest, not truncation here.
+ * Note that in some cases (clamp to edge, no texel offsets) we
+ * could use a non-signed build context which would help archs which
+ * don't have fptosi intrinsic with nearest rounding implemented.
+ */
+ s = lp_build_iround(&bld->coord_bld, s);
if (dims >= 2)
- t = LLVMBuildFPToSI(builder, t, i32_vec_type, "");
+ t = lp_build_iround(&bld->coord_bld, t);
if (dims >= 3)
- r = LLVMBuildFPToSI(builder, r, i32_vec_type, "");
+ r = lp_build_iround(&bld->coord_bld, r);
/* subtract 0.5 (add -128) */
i32_c128 = lp_build_const_int_vec(bld->gallivm, i32.type, -128);
&x_subcoord[0], &x_subcoord[1]);
/* add potential cube/array/mip offsets now as they are constant per pixel */
- if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
- bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
- bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
+ if (has_layer_coord(bld->static_texture_state->target)) {
LLVMValueRef z_offset;
z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec);
/* The r coord is the cube face in [0,5] or array layer */
if (dims >= 3) {
lp_build_sample_wrap_linear_int(bld,
- bld->format_desc->block.height,
+ 1, /* block length (depth) */
r_ipart, &r_fpart, r_float,
depth_vec, z_stride, offsets[2],
bld->static_texture_state->pot_depth,
if (dims >= 3) {
lp_build_sample_wrap_linear_float(bld,
- bld->format_desc->block.height,
+ 1, /* block length (depth) */
r, depth_vec, offsets[2],
bld->static_texture_state->pot_depth,
bld->static_sampler_state->wrap_r,
&x_offset1, &x_subcoord[1]);
/* add potential cube/array/mip offsets now as they are constant per pixel */
- if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE ||
- bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY ||
- bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) {
+ if (has_layer_coord(bld->static_texture_state->target)) {
LLVMValueRef z_offset;
z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec);
/* The r coord is the cube face in [0,5] or array layer */
lp_build_mipmap_level_sizes(bld, ilevel0,
&size0,
&row_stride0_vec, &img_stride0_vec);
- if (bld->num_lods == 1) {
+ if (bld->num_mips == 1) {
data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0);
}
else {
if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
LLVMValueRef h16vec_scale = lp_build_const_vec(bld->gallivm,
- bld->perquadf_bld.type, 256.0);
- LLVMTypeRef i32vec_type = lp_build_vec_type(bld->gallivm, bld->perquadi_bld.type);
+ bld->lodf_bld.type, 256.0);
+ LLVMTypeRef i32vec_type = bld->lodi_bld.vec_type;
struct lp_build_if_state if_ctx;
LLVMValueRef need_lerp;
unsigned num_quads = bld->coord_bld.type.length / 4;
lod_fpart = LLVMBuildFPToSI(builder, lod_fpart, i32vec_type, "lod_fpart.fixed16");
/* need_lerp = lod_fpart > 0 */
- if (num_quads == 1) {
+ if (bld->num_lods == 1) {
need_lerp = LLVMBuildICmp(builder, LLVMIntSGT,
- lod_fpart, bld->perquadi_bld.zero,
+ lod_fpart, bld->lodi_bld.zero,
"need_lerp");
}
else {
* lod_fpart values have same sign.
* We can however then skip the greater than comparison.
*/
- lod_fpart = lp_build_max(&bld->perquadi_bld, lod_fpart,
- bld->perquadi_bld.zero);
- need_lerp = lp_build_any_true_range(&bld->perquadi_bld, num_quads, lod_fpart);
+ lod_fpart = lp_build_max(&bld->lodi_bld, lod_fpart,
+ bld->lodi_bld.zero);
+ need_lerp = lp_build_any_true_range(&bld->lodi_bld, bld->num_lods, lod_fpart);
}
lp_build_if(&if_ctx, bld->gallivm, need_lerp);
lp_build_mipmap_level_sizes(bld, ilevel1,
&size1,
&row_stride1_vec, &img_stride1_vec);
- lp_build_mipmap_level_sizes(bld, ilevel1,
- &size1,
- &row_stride1_vec, &img_stride1_vec);
- if (bld->num_lods == 1) {
+ if (bld->num_mips == 1) {
data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1);
}
else {
/* interpolate samples from the two mipmap levels */
- if (num_quads == 1) {
+ if (num_quads == 1 && bld->num_lods == 1) {
lod_fpart = LLVMBuildTrunc(builder, lod_fpart, u8n_bld.elem_type, "");
lod_fpart = lp_build_broadcast_scalar(&u8n_bld, lod_fpart);
-
-#if HAVE_LLVM == 0x208
- /* This was a work-around for a bug in LLVM 2.8.
- * Evidently, something goes wrong in the construction of the
- * lod_fpart short[8] vector. Adding this no-effect shuffle seems
- * to force the vector to be properly constructed.
- * Tested with mesa-demos/src/tests/mipmap_limits.c (press t, f).
- */
-#error Unsupported
-#endif
}
else {
- const unsigned num_chans_per_quad = 4 * 4;
- LLVMTypeRef tmp_vec_type = LLVMVectorType(u8n_bld.elem_type, bld->perquadi_bld.type.length);
+ unsigned num_chans_per_lod = 4 * bld->coord_type.length / bld->num_lods;
+ LLVMTypeRef tmp_vec_type = LLVMVectorType(u8n_bld.elem_type, bld->lodi_bld.type.length);
LLVMValueRef shuffle[LP_MAX_VECTOR_LENGTH];
/* Take the LSB of lod_fpart */
lod_fpart = LLVMBuildTrunc(builder, lod_fpart, tmp_vec_type, "");
/* Broadcast each lod weight into their respective channels */
- assert(u8n_bld.type.length == num_quads * num_chans_per_quad);
for (i = 0; i < u8n_bld.type.length; ++i) {
- shuffle[i] = lp_build_const_int32(bld->gallivm, i / num_chans_per_quad);
+ shuffle[i] = lp_build_const_int32(bld->gallivm, i / num_chans_per_lod);
}
lod_fpart = LLVMBuildShuffleVector(builder, lod_fpart, LLVMGetUndef(tmp_vec_type),
LLVMConstVector(shuffle, u8n_bld.type.length), "");
LLVMValueRef t,
LLVMValueRef r,
const LLVMValueRef *offsets,
- LLVMValueRef lod_ipart,
+ LLVMValueRef lod_positive,
LLVMValueRef lod_fpart,
LLVMValueRef ilevel0,
LLVMValueRef ilevel1,
LLVMValueRef texel_out[4])
{
- struct lp_build_context *int_bld = &bld->int_bld;
LLVMBuilderRef builder = bld->gallivm->builder;
const unsigned mip_filter = bld->static_sampler_state->min_mip_filter;
const unsigned min_filter = bld->static_sampler_state->min_img_filter;
* depending on the lod being > 0 or <= 0, respectively.
*/
struct lp_build_if_state if_ctx;
- LLVMValueRef minify;
/*
- * XXX this should to all lods into account, if some are min
- * some max probably could hack up the coords/weights in the linear
+ * FIXME this should take all lods into account, if some are min
+ * some max probably could hack up the weights in the linear
* path with selects to work for nearest.
- * If that's just two quads sitting next to each other it seems
- * quite ok to do the same filtering method on both though, at
- * least unless we have explicit lod (and who uses different
- * min/mag filter with that?)
*/
if (bld->num_lods > 1)
- lod_ipart = LLVMBuildExtractElement(builder, lod_ipart,
- lp_build_const_int32(bld->gallivm, 0), "");
+ lod_positive = LLVMBuildExtractElement(builder, lod_positive,
+ lp_build_const_int32(bld->gallivm, 0), "");
- /* minify = lod >= 0.0 */
- minify = LLVMBuildICmp(builder, LLVMIntSGE,
- lod_ipart, int_bld->zero, "");
+ lod_positive = LLVMBuildTrunc(builder, lod_positive,
+ LLVMInt1TypeInContext(bld->gallivm->context), "");
- lp_build_if(&if_ctx, bld->gallivm, minify);
+ lp_build_if(&if_ctx, bld->gallivm, lod_positive);
{
/* Use the minification filter */
lp_build_sample_mipmap(bld,