X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;ds=sidebyside;f=src%2Fgallium%2Fauxiliary%2Fgallivm%2Flp_bld_sample_aos.c;h=bddff2c4a133145f3f36386267c5f86232784555;hb=cbf0f666311a5cb2720a4d6f4c540da1dd33e418;hp=d6831a580b3c8c6cc17e55bcfc19e3ebbb3fbebd;hpb=dbf3a15313eed930a3d8fdde12e457259c43651b;p=mesa.git diff --git a/src/gallium/auxiliary/gallivm/lp_bld_sample_aos.c b/src/gallium/auxiliary/gallivm/lp_bld_sample_aos.c index d6831a580b3..bddff2c4a13 100644 --- a/src/gallium/auxiliary/gallivm/lp_bld_sample_aos.c +++ b/src/gallium/auxiliary/gallivm/lp_bld_sample_aos.c @@ -27,7 +27,7 @@ /** * @file - * Texture sampling -- SoA. + * Texture sampling -- AoS. * * @author Jose Fonseca * @author Brian Paul @@ -40,6 +40,7 @@ #include "util/u_memory.h" #include "util/u_math.h" #include "util/u_format.h" +#include "util/u_cpu_detect.h" #include "lp_bld_debug.h" #include "lp_bld_type.h" #include "lp_bld_const.h" @@ -52,6 +53,7 @@ #include "lp_bld_flow.h" #include "lp_bld_gather.h" #include "lp_bld_format.h" +#include "lp_bld_init.h" #include "lp_bld_sample.h" #include "lp_bld_sample_aos.h" #include "lp_bld_quad.h" @@ -74,6 +76,7 @@ static void lp_build_sample_wrap_nearest_int(struct lp_build_sample_context *bld, unsigned block_length, LLVMValueRef coord, + LLVMValueRef coord_f, LLVMValueRef length, LLVMValueRef stride, boolean is_pot, @@ -82,6 +85,7 @@ lp_build_sample_wrap_nearest_int(struct lp_build_sample_context *bld, LLVMValueRef *out_i) { struct lp_build_context *int_coord_bld = &bld->int_coord_bld; + LLVMBuilderRef builder = bld->gallivm->builder; LLVMValueRef length_minus_one; length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one); @@ -89,12 +93,13 @@ lp_build_sample_wrap_nearest_int(struct lp_build_sample_context *bld, switch(wrap_mode) { case PIPE_TEX_WRAP_REPEAT: if(is_pot) - coord = LLVMBuildAnd(bld->builder, coord, length_minus_one, ""); + coord = LLVMBuildAnd(builder, coord, length_minus_one, ""); else { - /* Add a bias to the texcoord to handle negative coords */ - LLVMValueRef bias = lp_build_mul_imm(int_coord_bld, length, 1024); - coord = LLVMBuildAdd(bld->builder, coord, bias, ""); - coord = LLVMBuildURem(bld->builder, coord, length, ""); + struct lp_build_context *coord_bld = &bld->coord_bld; + LLVMValueRef length_f = lp_build_int_to_float(coord_bld, length); + coord = lp_build_fract_safe(coord_bld, coord_f); + coord = lp_build_mul(coord_bld, coord, length_f); + coord = lp_build_itrunc(coord_bld, coord); } break; @@ -118,6 +123,56 @@ lp_build_sample_wrap_nearest_int(struct lp_build_sample_context *bld, } +/** + * Build LLVM code for texture coord wrapping, for nearest filtering, + * for float texcoords. + * \param coord the incoming texcoord (s,t,r or q) + * \param length the texture size along one dimension + * \param is_pot if TRUE, length is a power of two + * \param wrap_mode one of PIPE_TEX_WRAP_x + * \param icoord the texcoord after wrapping, as int + */ +static void +lp_build_sample_wrap_nearest_float(struct lp_build_sample_context *bld, + LLVMValueRef coord, + LLVMValueRef length, + boolean is_pot, + unsigned wrap_mode, + LLVMValueRef *icoord) +{ + struct lp_build_context *coord_bld = &bld->coord_bld; + LLVMValueRef length_minus_one; + + switch(wrap_mode) { + case PIPE_TEX_WRAP_REPEAT: + /* take fraction, unnormalize */ + coord = lp_build_fract_safe(coord_bld, coord); + coord = lp_build_mul(coord_bld, coord, length); + *icoord = lp_build_itrunc(coord_bld, coord); + break; + case PIPE_TEX_WRAP_CLAMP_TO_EDGE: + length_minus_one = lp_build_sub(coord_bld, length, coord_bld->one); + if (bld->static_sampler_state->normalized_coords) { + /* scale coord to length */ + coord = lp_build_mul(coord_bld, coord, length); + } + coord = lp_build_clamp(coord_bld, coord, coord_bld->zero, + length_minus_one); + *icoord = lp_build_itrunc(coord_bld, coord); + break; + + case PIPE_TEX_WRAP_CLAMP: + case PIPE_TEX_WRAP_CLAMP_TO_BORDER: + case PIPE_TEX_WRAP_MIRROR_REPEAT: + case PIPE_TEX_WRAP_MIRROR_CLAMP: + case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: + case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: + default: + assert(0); + } +} + + /** * Build LLVM code for texture coord wrapping, for linear filtering, * for scaled integer texcoords. @@ -137,6 +192,8 @@ static void lp_build_sample_wrap_linear_int(struct lp_build_sample_context *bld, unsigned block_length, LLVMValueRef coord0, + LLVMValueRef *weight_i, + LLVMValueRef coord_f, LLVMValueRef length, LLVMValueRef stride, boolean is_pot, @@ -147,87 +204,120 @@ lp_build_sample_wrap_linear_int(struct lp_build_sample_context *bld, LLVMValueRef *i1) { struct lp_build_context *int_coord_bld = &bld->int_coord_bld; + LLVMBuilderRef builder = bld->gallivm->builder; LLVMValueRef length_minus_one; LLVMValueRef lmask, umask, mask; - if (block_length != 1) { - /* - * If the pixel block covers more than one pixel then there is no easy - * way to calculate offset1 relative to offset0. Instead, compute them - * independently. - */ - - LLVMValueRef coord1; - - lp_build_sample_wrap_nearest_int(bld, - block_length, - coord0, - length, - stride, - is_pot, - wrap_mode, - offset0, i0); - - coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one); + /* + * If the pixel block covers more than one pixel then there is no easy + * way to calculate offset1 relative to offset0. Instead, compute them + * independently. Otherwise, try to compute offset0 and offset1 with + * a single stride multiplication. + */ - lp_build_sample_wrap_nearest_int(bld, - block_length, - coord1, - length, - stride, - is_pot, - wrap_mode, - offset1, i1); + length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one); + if (block_length != 1) { + LLVMValueRef coord1; + switch(wrap_mode) { + case PIPE_TEX_WRAP_REPEAT: + if (is_pot) { + coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one); + coord0 = LLVMBuildAnd(builder, coord0, length_minus_one, ""); + coord1 = LLVMBuildAnd(builder, coord1, length_minus_one, ""); + } + else { + LLVMValueRef mask; + LLVMValueRef weight; + LLVMValueRef length_f = lp_build_int_to_float(&bld->coord_bld, length); + lp_build_coord_repeat_npot_linear(bld, coord_f, + length, length_f, + &coord0, &weight); + mask = lp_build_compare(bld->gallivm, int_coord_bld->type, + PIPE_FUNC_NOTEQUAL, coord0, length_minus_one); + coord1 = LLVMBuildAnd(builder, + lp_build_add(int_coord_bld, coord0, + int_coord_bld->one), + mask, ""); + weight = lp_build_mul_imm(&bld->coord_bld, weight, 256); + *weight_i = lp_build_itrunc(&bld->coord_bld, weight); + } + break; + + case PIPE_TEX_WRAP_CLAMP_TO_EDGE: + coord1 = lp_build_add(int_coord_bld, coord0, int_coord_bld->one); + coord0 = lp_build_clamp(int_coord_bld, coord0, int_coord_bld->zero, + length_minus_one); + coord1 = lp_build_clamp(int_coord_bld, coord1, int_coord_bld->zero, + length_minus_one); + break; + + case PIPE_TEX_WRAP_CLAMP: + case PIPE_TEX_WRAP_CLAMP_TO_BORDER: + case PIPE_TEX_WRAP_MIRROR_REPEAT: + case PIPE_TEX_WRAP_MIRROR_CLAMP: + case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: + case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: + default: + assert(0); + coord0 = int_coord_bld->zero; + coord1 = int_coord_bld->zero; + break; + } + lp_build_sample_partial_offset(int_coord_bld, block_length, coord0, stride, + offset0, i0); + lp_build_sample_partial_offset(int_coord_bld, block_length, coord1, stride, + offset1, i1); return; } - /* - * Scalar pixels -- try to compute offset0 and offset1 with a single stride - * multiplication. - */ - *i0 = int_coord_bld->zero; *i1 = int_coord_bld->zero; - length_minus_one = lp_build_sub(int_coord_bld, length, int_coord_bld->one); - switch(wrap_mode) { case PIPE_TEX_WRAP_REPEAT: if (is_pot) { - coord0 = LLVMBuildAnd(bld->builder, coord0, length_minus_one, ""); + coord0 = LLVMBuildAnd(builder, coord0, length_minus_one, ""); } else { - /* Add a bias to the texcoord to handle negative coords */ - LLVMValueRef bias = lp_build_mul_imm(int_coord_bld, length, 1024); - coord0 = LLVMBuildAdd(bld->builder, coord0, bias, ""); - coord0 = LLVMBuildURem(bld->builder, coord0, length, ""); + LLVMValueRef weight; + LLVMValueRef length_f = lp_build_int_to_float(&bld->coord_bld, length); + lp_build_coord_repeat_npot_linear(bld, coord_f, + length, length_f, + &coord0, &weight); + weight = lp_build_mul_imm(&bld->coord_bld, weight, 256); + *weight_i = lp_build_itrunc(&bld->coord_bld, weight); } - mask = lp_build_compare(bld->builder, int_coord_bld->type, + mask = lp_build_compare(bld->gallivm, int_coord_bld->type, PIPE_FUNC_NOTEQUAL, coord0, length_minus_one); *offset0 = lp_build_mul(int_coord_bld, coord0, stride); - *offset1 = LLVMBuildAnd(bld->builder, + *offset1 = LLVMBuildAnd(builder, lp_build_add(int_coord_bld, *offset0, stride), mask, ""); break; case PIPE_TEX_WRAP_CLAMP_TO_EDGE: - lmask = lp_build_compare(int_coord_bld->builder, int_coord_bld->type, + /* XXX this might be slower than the separate path + * on some newer cpus. With sse41 this is 8 instructions vs. 7 + * - at least on SNB this is almost certainly slower since + * min/max are cheaper than selects, and the muls aren't bad. + */ + lmask = lp_build_compare(int_coord_bld->gallivm, int_coord_bld->type, PIPE_FUNC_GEQUAL, coord0, int_coord_bld->zero); - umask = lp_build_compare(int_coord_bld->builder, int_coord_bld->type, + umask = lp_build_compare(int_coord_bld->gallivm, int_coord_bld->type, PIPE_FUNC_LESS, coord0, length_minus_one); coord0 = lp_build_select(int_coord_bld, lmask, coord0, int_coord_bld->zero); coord0 = lp_build_select(int_coord_bld, umask, coord0, length_minus_one); - mask = LLVMBuildAnd(bld->builder, lmask, umask, ""); + mask = LLVMBuildAnd(builder, lmask, umask, ""); *offset0 = lp_build_mul(int_coord_bld, coord0, stride); *offset1 = lp_build_add(int_coord_bld, *offset0, - LLVMBuildAnd(bld->builder, stride, mask, "")); + LLVMBuildAnd(builder, stride, mask, "")); break; case PIPE_TEX_WRAP_CLAMP: @@ -245,6 +335,176 @@ lp_build_sample_wrap_linear_int(struct lp_build_sample_context *bld, } +/** + * Build LLVM code for texture coord wrapping, for linear filtering, + * for float texcoords. + * \param block_length is the length of the pixel block along the + * coordinate axis + * \param coord the incoming texcoord (s,t,r or q) + * \param length the texture size along one dimension + * \param is_pot if TRUE, length is a power of two + * \param wrap_mode one of PIPE_TEX_WRAP_x + * \param coord0 the first texcoord after wrapping, as int + * \param coord1 the second texcoord after wrapping, as int + * \param weight the filter weight as int (0-255) + * \param force_nearest if this coord actually uses nearest filtering + */ +static void +lp_build_sample_wrap_linear_float(struct lp_build_sample_context *bld, + unsigned block_length, + LLVMValueRef coord, + LLVMValueRef length, + boolean is_pot, + unsigned wrap_mode, + LLVMValueRef *coord0, + LLVMValueRef *coord1, + LLVMValueRef *weight, + unsigned force_nearest) +{ + struct lp_build_context *int_coord_bld = &bld->int_coord_bld; + struct lp_build_context *coord_bld = &bld->coord_bld; + LLVMBuilderRef builder = bld->gallivm->builder; + LLVMValueRef half = lp_build_const_vec(bld->gallivm, coord_bld->type, 0.5); + LLVMValueRef length_minus_one = lp_build_sub(coord_bld, length, coord_bld->one); + + switch(wrap_mode) { + case PIPE_TEX_WRAP_REPEAT: + if (is_pot) { + /* mul by size and subtract 0.5 */ + coord = lp_build_mul(coord_bld, coord, length); + if (!force_nearest) + coord = lp_build_sub(coord_bld, coord, half); + *coord1 = lp_build_add(coord_bld, coord, coord_bld->one); + /* convert to int, compute lerp weight */ + lp_build_ifloor_fract(coord_bld, coord, coord0, weight); + *coord1 = lp_build_ifloor(coord_bld, *coord1); + /* repeat wrap */ + length_minus_one = lp_build_itrunc(coord_bld, length_minus_one); + *coord0 = LLVMBuildAnd(builder, *coord0, length_minus_one, ""); + *coord1 = LLVMBuildAnd(builder, *coord1, length_minus_one, ""); + } + else { + LLVMValueRef mask; + /* wrap with normalized floats is just fract */ + coord = lp_build_fract(coord_bld, coord); + /* unnormalize */ + coord = lp_build_mul(coord_bld, coord, length); + /* + * we avoided the 0.5/length division, have to fix up wrong + * edge cases with selects + */ + *coord1 = lp_build_add(coord_bld, coord, half); + coord = lp_build_sub(coord_bld, coord, half); + *weight = lp_build_fract(coord_bld, coord); + mask = lp_build_compare(coord_bld->gallivm, coord_bld->type, + PIPE_FUNC_LESS, coord, coord_bld->zero); + *coord0 = lp_build_select(coord_bld, mask, length_minus_one, coord); + *coord0 = lp_build_itrunc(coord_bld, *coord0); + mask = lp_build_compare(coord_bld->gallivm, coord_bld->type, + PIPE_FUNC_LESS, *coord1, length); + *coord1 = lp_build_select(coord_bld, mask, *coord1, coord_bld->zero); + *coord1 = lp_build_itrunc(coord_bld, *coord1); + } + break; + case PIPE_TEX_WRAP_CLAMP_TO_EDGE: + if (bld->static_sampler_state->normalized_coords) { + /* mul by tex size */ + coord = lp_build_mul(coord_bld, coord, length); + } + /* subtract 0.5 */ + if (!force_nearest) { + coord = lp_build_sub(coord_bld, coord, half); + } + /* clamp to [0, length - 1] */ + coord = lp_build_min(coord_bld, coord, length_minus_one); + coord = lp_build_max(coord_bld, coord, coord_bld->zero); + *coord1 = lp_build_add(coord_bld, coord, coord_bld->one); + /* convert to int, compute lerp weight */ + lp_build_ifloor_fract(coord_bld, coord, coord0, weight); + /* coord1 = min(coord1, length-1) */ + *coord1 = lp_build_min(coord_bld, *coord1, length_minus_one); + *coord1 = lp_build_itrunc(coord_bld, *coord1); + break; + default: + assert(0); + *coord0 = int_coord_bld->zero; + *coord1 = int_coord_bld->zero; + *weight = coord_bld->zero; + break; + } + *weight = lp_build_mul_imm(coord_bld, *weight, 256); + *weight = lp_build_itrunc(coord_bld, *weight); + return; +} + + +/** + * Fetch texels for image with nearest sampling. + * Return filtered color as two vectors of 16-bit fixed point values. + */ +static void +lp_build_sample_fetch_image_nearest(struct lp_build_sample_context *bld, + LLVMValueRef data_ptr, + LLVMValueRef offset, + LLVMValueRef x_subcoord, + LLVMValueRef y_subcoord, + LLVMValueRef *colors_lo, + LLVMValueRef *colors_hi) +{ + /* + * Fetch the pixels as 4 x 32bit (rgba order might differ): + * + * rgba0 rgba1 rgba2 rgba3 + * + * bit cast them into 16 x u8 + * + * r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3 + * + * unpack them into two 8 x i16: + * + * r0 g0 b0 a0 r1 g1 b1 a1 + * r2 g2 b2 a2 r3 g3 b3 a3 + * + * The higher 8 bits of the resulting elements will be zero. + */ + LLVMBuilderRef builder = bld->gallivm->builder; + LLVMValueRef rgba8; + struct lp_build_context h16, u8n; + LLVMTypeRef u8n_vec_type; + + lp_build_context_init(&h16, bld->gallivm, lp_type_ufixed(16, bld->vector_width)); + lp_build_context_init(&u8n, bld->gallivm, lp_type_unorm(8, bld->vector_width)); + u8n_vec_type = lp_build_vec_type(bld->gallivm, u8n.type); + + if (util_format_is_rgba8_variant(bld->format_desc)) { + /* + * Given the format is a rgba8, just read the pixels as is, + * without any swizzling. Swizzling will be done later. + */ + rgba8 = lp_build_gather(bld->gallivm, + bld->texel_type.length, + bld->format_desc->block.bits, + bld->texel_type.width, + data_ptr, offset); + + rgba8 = LLVMBuildBitCast(builder, rgba8, u8n_vec_type, ""); + } + else { + rgba8 = lp_build_fetch_rgba_aos(bld->gallivm, + bld->format_desc, + u8n.type, + data_ptr, offset, + x_subcoord, + y_subcoord); + } + + /* Expand one 4*rgba8 to two 2*rgba16 */ + lp_build_unpack2(bld->gallivm, u8n.type, h16.type, + rgba8, + colors_lo, colors_hi); +} + + /** * Sample a single texture image with nearest sampling. * If sampling a cube texture, r = cube face in [0,5]. @@ -256,6 +516,7 @@ lp_build_sample_image_nearest(struct lp_build_sample_context *bld, LLVMValueRef row_stride_vec, LLVMValueRef img_stride_vec, LLVMValueRef data_ptr, + LLVMValueRef mipoffsets, LLVMValueRef s, LLVMValueRef t, LLVMValueRef r, @@ -263,33 +524,32 @@ lp_build_sample_image_nearest(struct lp_build_sample_context *bld, LLVMValueRef *colors_hi) { const unsigned dims = bld->dims; - LLVMBuilderRef builder = bld->builder; - struct lp_build_context i32, h16, u8n; - LLVMTypeRef i32_vec_type, h16_vec_type, u8n_vec_type; + LLVMBuilderRef builder = bld->gallivm->builder; + struct lp_build_context i32; + LLVMTypeRef i32_vec_type; LLVMValueRef i32_c8; LLVMValueRef width_vec, height_vec, depth_vec; LLVMValueRef s_ipart, t_ipart = NULL, r_ipart = NULL; + LLVMValueRef s_float, t_float = NULL, r_float = NULL; LLVMValueRef x_stride; LLVMValueRef x_offset, offset; LLVMValueRef x_subcoord, y_subcoord, z_subcoord; - lp_build_context_init(&i32, builder, lp_type_int_vec(32)); - lp_build_context_init(&h16, builder, lp_type_ufixed(16)); - lp_build_context_init(&u8n, builder, lp_type_unorm(8)); + lp_build_context_init(&i32, bld->gallivm, lp_type_int_vec(32, bld->vector_width)); - i32_vec_type = lp_build_vec_type(i32.type); - h16_vec_type = lp_build_vec_type(h16.type); - u8n_vec_type = lp_build_vec_type(u8n.type); + i32_vec_type = lp_build_vec_type(bld->gallivm, i32.type); lp_build_extract_image_sizes(bld, - bld->int_size_type, + &bld->int_size_bld, bld->int_coord_type, int_size, &width_vec, &height_vec, &depth_vec); - if (bld->static_state->normalized_coords) { + s_float = s; t_float = t; r_float = r; + + if (bld->static_sampler_state->normalized_coords) { LLVMValueRef scaled_size; LLVMValueRef flt_size; @@ -317,7 +577,7 @@ lp_build_sample_image_nearest(struct lp_build_sample_context *bld, r = LLVMBuildFPToSI(builder, r, i32_vec_type, ""); /* compute floor (shift right 8) */ - i32_c8 = lp_build_const_int_vec(i32.type, 8); + i32_c8 = lp_build_const_int_vec(bld->gallivm, i32.type, 8); s_ipart = LLVMBuildAShr(builder, s, i32_c8, ""); if (dims >= 2) t_ipart = LLVMBuildAShr(builder, t, i32_c8, ""); @@ -325,43 +585,248 @@ lp_build_sample_image_nearest(struct lp_build_sample_context *bld, r_ipart = LLVMBuildAShr(builder, r, i32_c8, ""); /* get pixel, row, image strides */ - x_stride = lp_build_const_vec(bld->int_coord_bld.type, + x_stride = lp_build_const_vec(bld->gallivm, + bld->int_coord_bld.type, bld->format_desc->block.bits/8); /* Do texcoord wrapping, compute texel offset */ lp_build_sample_wrap_nearest_int(bld, bld->format_desc->block.width, - s_ipart, width_vec, x_stride, - bld->static_state->pot_width, - bld->static_state->wrap_s, + s_ipart, s_float, + width_vec, x_stride, + bld->static_texture_state->pot_width, + bld->static_sampler_state->wrap_s, &x_offset, &x_subcoord); offset = x_offset; if (dims >= 2) { LLVMValueRef y_offset; lp_build_sample_wrap_nearest_int(bld, bld->format_desc->block.height, - t_ipart, height_vec, row_stride_vec, - bld->static_state->pot_height, - bld->static_state->wrap_t, + t_ipart, t_float, + height_vec, row_stride_vec, + bld->static_texture_state->pot_height, + bld->static_sampler_state->wrap_t, &y_offset, &y_subcoord); offset = lp_build_add(&bld->int_coord_bld, offset, y_offset); if (dims >= 3) { LLVMValueRef z_offset; lp_build_sample_wrap_nearest_int(bld, 1, /* block length (depth) */ - r_ipart, depth_vec, img_stride_vec, - bld->static_state->pot_height, - bld->static_state->wrap_r, + r_ipart, r_float, + depth_vec, img_stride_vec, + bld->static_texture_state->pot_depth, + bld->static_sampler_state->wrap_r, &z_offset, &z_subcoord); offset = lp_build_add(&bld->int_coord_bld, offset, z_offset); } - else if (bld->static_state->target == PIPE_TEXTURE_CUBE) { - LLVMValueRef z_offset; - /* The r coord is the cube face in [0,5] */ - z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec); - offset = lp_build_add(&bld->int_coord_bld, offset, z_offset); + } + if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE || + bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY || + bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) { + LLVMValueRef z_offset; + /* The r coord is the cube face in [0,5] or array layer */ + z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec); + offset = lp_build_add(&bld->int_coord_bld, offset, z_offset); + } + if (mipoffsets) { + offset = lp_build_add(&bld->int_coord_bld, offset, mipoffsets); + } + + lp_build_sample_fetch_image_nearest(bld, data_ptr, offset, + x_subcoord, y_subcoord, + colors_lo, colors_hi); +} + + +/** + * Sample a single texture image with nearest sampling. + * If sampling a cube texture, r = cube face in [0,5]. + * Return filtered color as two vectors of 16-bit fixed point values. + * Does address calcs (except offsets) with floats. + * Useful for AVX which has support for 8x32 floats but not 8x32 ints. + */ +static void +lp_build_sample_image_nearest_afloat(struct lp_build_sample_context *bld, + LLVMValueRef int_size, + LLVMValueRef row_stride_vec, + LLVMValueRef img_stride_vec, + LLVMValueRef data_ptr, + LLVMValueRef mipoffsets, + LLVMValueRef s, + LLVMValueRef t, + LLVMValueRef r, + LLVMValueRef *colors_lo, + LLVMValueRef *colors_hi) + { + const unsigned dims = bld->dims; + LLVMValueRef width_vec, height_vec, depth_vec; + LLVMValueRef offset; + LLVMValueRef x_subcoord, y_subcoord; + LLVMValueRef x_icoord = NULL, y_icoord = NULL, z_icoord = NULL; + LLVMValueRef flt_size; + + flt_size = lp_build_int_to_float(&bld->float_size_bld, int_size); + + lp_build_extract_image_sizes(bld, + &bld->float_size_bld, + bld->coord_type, + flt_size, + &width_vec, + &height_vec, + &depth_vec); + + /* Do texcoord wrapping */ + lp_build_sample_wrap_nearest_float(bld, + s, width_vec, + bld->static_texture_state->pot_width, + bld->static_sampler_state->wrap_s, + &x_icoord); + + if (dims >= 2) { + lp_build_sample_wrap_nearest_float(bld, + t, height_vec, + bld->static_texture_state->pot_height, + bld->static_sampler_state->wrap_t, + &y_icoord); + + if (dims >= 3) { + lp_build_sample_wrap_nearest_float(bld, + r, depth_vec, + bld->static_texture_state->pot_depth, + bld->static_sampler_state->wrap_r, + &z_icoord); } } + if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE || + bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY || + bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) { + z_icoord = r; + } + + /* + * From here on we deal with ints, and we should split up the 256bit + * vectors manually for better generated code. + */ + + /* + * compute texel offsets - + * cannot do offset calc with floats, difficult for block-based formats, + * and not enough precision anyway. + */ + lp_build_sample_offset(&bld->int_coord_bld, + bld->format_desc, + x_icoord, y_icoord, + z_icoord, + row_stride_vec, img_stride_vec, + &offset, + &x_subcoord, &y_subcoord); + if (mipoffsets) { + offset = lp_build_add(&bld->int_coord_bld, offset, mipoffsets); + } + + lp_build_sample_fetch_image_nearest(bld, data_ptr, offset, + x_subcoord, y_subcoord, + colors_lo, colors_hi); +} + + +/** + * Fetch texels for image with linear sampling. + * Return filtered color as two vectors of 16-bit fixed point values. + */ +static void +lp_build_sample_fetch_image_linear(struct lp_build_sample_context *bld, + LLVMValueRef data_ptr, + LLVMValueRef offset[2][2][2], + LLVMValueRef x_subcoord[2], + LLVMValueRef y_subcoord[2], + LLVMValueRef s_fpart, + LLVMValueRef t_fpart, + LLVMValueRef r_fpart, + LLVMValueRef *colors_lo, + LLVMValueRef *colors_hi) +{ + const unsigned dims = bld->dims; + LLVMBuilderRef builder = bld->gallivm->builder; + struct lp_build_context h16, u8n; + LLVMTypeRef h16_vec_type, u8n_vec_type; + LLVMTypeRef elem_type = LLVMInt32TypeInContext(bld->gallivm->context); + LLVMValueRef shuffles_lo[LP_MAX_VECTOR_LENGTH]; + LLVMValueRef shuffles_hi[LP_MAX_VECTOR_LENGTH]; + LLVMValueRef shuffle_lo, shuffle_hi; + LLVMValueRef s_fpart_lo, s_fpart_hi; + LLVMValueRef t_fpart_lo = NULL, t_fpart_hi = NULL; + LLVMValueRef r_fpart_lo = NULL, r_fpart_hi = NULL; + LLVMValueRef neighbors_lo[2][2][2]; /* [z][y][x] */ + LLVMValueRef neighbors_hi[2][2][2]; /* [z][y][x] */ + LLVMValueRef packed_lo, packed_hi; + unsigned i, j, k; + unsigned numj, numk; + + lp_build_context_init(&h16, bld->gallivm, lp_type_ufixed(16, bld->vector_width)); + lp_build_context_init(&u8n, bld->gallivm, lp_type_unorm(8, bld->vector_width)); + h16_vec_type = lp_build_vec_type(bld->gallivm, h16.type); + u8n_vec_type = lp_build_vec_type(bld->gallivm, u8n.type); + + /* + * Transform 4 x i32 in + * + * s_fpart = {s0, s1, s2, s3} + * + * into 8 x i16 + * + * s_fpart = {00, s0, 00, s1, 00, s2, 00, s3} + * + * into two 8 x i16 + * + * s_fpart_lo = {s0, s0, s0, s0, s1, s1, s1, s1} + * s_fpart_hi = {s2, s2, s2, s2, s3, s3, s3, s3} + * + * and likewise for t_fpart. There is no risk of loosing precision here + * since the fractional parts only use the lower 8bits. + */ + s_fpart = LLVMBuildBitCast(builder, s_fpart, h16_vec_type, ""); + if (dims >= 2) + t_fpart = LLVMBuildBitCast(builder, t_fpart, h16_vec_type, ""); + if (dims >= 3) + r_fpart = LLVMBuildBitCast(builder, r_fpart, h16_vec_type, ""); + + for (j = 0; j < h16.type.length; j += 4) { +#ifdef PIPE_ARCH_LITTLE_ENDIAN + unsigned subindex = 0; +#else + unsigned subindex = 1; +#endif + LLVMValueRef index; + + index = LLVMConstInt(elem_type, j/2 + subindex, 0); + for (i = 0; i < 4; ++i) + shuffles_lo[j + i] = index; + + index = LLVMConstInt(elem_type, h16.type.length/2 + j/2 + subindex, 0); + for (i = 0; i < 4; ++i) + shuffles_hi[j + i] = index; + } + + shuffle_lo = LLVMConstVector(shuffles_lo, h16.type.length); + shuffle_hi = LLVMConstVector(shuffles_hi, h16.type.length); + + s_fpart_lo = LLVMBuildShuffleVector(builder, s_fpart, h16.undef, + shuffle_lo, ""); + s_fpart_hi = LLVMBuildShuffleVector(builder, s_fpart, h16.undef, + shuffle_hi, ""); + if (dims >= 2) { + t_fpart_lo = LLVMBuildShuffleVector(builder, t_fpart, h16.undef, + shuffle_lo, ""); + t_fpart_hi = LLVMBuildShuffleVector(builder, t_fpart, h16.undef, + shuffle_hi, ""); + } + if (dims >= 3) { + r_fpart_lo = LLVMBuildShuffleVector(builder, r_fpart, h16.undef, + shuffle_lo, ""); + r_fpart_hi = LLVMBuildShuffleVector(builder, r_fpart, h16.undef, + shuffle_hi, ""); + } /* * Fetch the pixels as 4 x 32bit (rgba order might differ): @@ -379,38 +844,129 @@ lp_build_sample_image_nearest(struct lp_build_sample_context *bld, * * The higher 8 bits of the resulting elements will be zero. */ - { - LLVMValueRef rgba8; + numj = 1 + (dims >= 2); + numk = 1 + (dims >= 3); - if (util_format_is_rgba8_variant(bld->format_desc)) { - /* - * Given the format is a rgba8, just read the pixels as is, - * without any swizzling. Swizzling will be done later. - */ - rgba8 = lp_build_gather(bld->builder, - bld->texel_type.length, - bld->format_desc->block.bits, - bld->texel_type.width, - data_ptr, offset); + for (k = 0; k < numk; k++) { + for (j = 0; j < numj; j++) { + for (i = 0; i < 2; i++) { + LLVMValueRef rgba8; + + if (util_format_is_rgba8_variant(bld->format_desc)) { + /* + * Given the format is a rgba8, just read the pixels as is, + * without any swizzling. Swizzling will be done later. + */ + rgba8 = lp_build_gather(bld->gallivm, + bld->texel_type.length, + bld->format_desc->block.bits, + bld->texel_type.width, + data_ptr, offset[k][j][i]); + + rgba8 = LLVMBuildBitCast(builder, rgba8, u8n_vec_type, ""); + } + else { + rgba8 = lp_build_fetch_rgba_aos(bld->gallivm, + bld->format_desc, + u8n.type, + data_ptr, offset[k][j][i], + x_subcoord[i], + y_subcoord[j]); + } - rgba8 = LLVMBuildBitCast(builder, rgba8, u8n_vec_type, ""); + /* Expand one 4*rgba8 to two 2*rgba16 */ + lp_build_unpack2(bld->gallivm, u8n.type, h16.type, + rgba8, + &neighbors_lo[k][j][i], &neighbors_hi[k][j][i]); + } + } + } + + /* + * Linear interpolation with 8.8 fixed point. + */ + if (bld->static_sampler_state->force_nearest_s) { + /* special case 1-D lerp */ + packed_lo = lp_build_lerp(&h16, + t_fpart_lo, + neighbors_lo[0][0][0], + neighbors_lo[0][0][1]); + + packed_hi = lp_build_lerp(&h16, + t_fpart_hi, + neighbors_hi[0][1][0], + neighbors_hi[0][1][0]); + } + else if (bld->static_sampler_state->force_nearest_t) { + /* special case 1-D lerp */ + packed_lo = lp_build_lerp(&h16, + s_fpart_lo, + neighbors_lo[0][0][0], + neighbors_lo[0][0][1]); + + packed_hi = lp_build_lerp(&h16, + s_fpart_hi, + neighbors_hi[0][0][0], + neighbors_hi[0][0][1]); + } + else { + /* general 1/2/3-D lerping */ + if (dims == 1) { + packed_lo = lp_build_lerp(&h16, + s_fpart_lo, + neighbors_lo[0][0][0], + neighbors_lo[0][0][1]); + + packed_hi = lp_build_lerp(&h16, + s_fpart_hi, + neighbors_hi[0][0][0], + neighbors_hi[0][0][1]); } else { - rgba8 = lp_build_fetch_rgba_aos(bld->builder, - bld->format_desc, - u8n.type, - data_ptr, offset, - x_subcoord, - y_subcoord); + /* 2-D lerp */ + packed_lo = lp_build_lerp_2d(&h16, + s_fpart_lo, t_fpart_lo, + neighbors_lo[0][0][0], + neighbors_lo[0][0][1], + neighbors_lo[0][1][0], + neighbors_lo[0][1][1]); + + packed_hi = lp_build_lerp_2d(&h16, + s_fpart_hi, t_fpart_hi, + neighbors_hi[0][0][0], + neighbors_hi[0][0][1], + neighbors_hi[0][1][0], + neighbors_hi[0][1][1]); + + if (dims >= 3) { + LLVMValueRef packed_lo2, packed_hi2; + + /* lerp in the second z slice */ + packed_lo2 = lp_build_lerp_2d(&h16, + s_fpart_lo, t_fpart_lo, + neighbors_lo[1][0][0], + neighbors_lo[1][0][1], + neighbors_lo[1][1][0], + neighbors_lo[1][1][1]); + + packed_hi2 = lp_build_lerp_2d(&h16, + s_fpart_hi, t_fpart_hi, + neighbors_hi[1][0][0], + neighbors_hi[1][0][1], + neighbors_hi[1][1][0], + neighbors_hi[1][1][1]); + /* interp between two z slices */ + packed_lo = lp_build_lerp(&h16, r_fpart_lo, + packed_lo, packed_lo2); + packed_hi = lp_build_lerp(&h16, r_fpart_hi, + packed_hi, packed_hi2); + } } - - /* Expand one 4*rgba8 to two 2*rgba16 */ - lp_build_unpack2(builder, u8n.type, h16.type, - rgba8, - colors_lo, colors_hi); } -} + *colors_lo = packed_lo; + *colors_hi = packed_hi; +} /** * Sample a single texture image with (bi-)(tri-)linear sampling. @@ -422,6 +978,7 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, LLVMValueRef row_stride_vec, LLVMValueRef img_stride_vec, LLVMValueRef data_ptr, + LLVMValueRef mipoffsets, LLVMValueRef s, LLVMValueRef t, LLVMValueRef r, @@ -429,44 +986,37 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, LLVMValueRef *colors_hi) { const unsigned dims = bld->dims; - LLVMBuilderRef builder = bld->builder; - struct lp_build_context i32, h16, u8n; - LLVMTypeRef i32_vec_type, h16_vec_type, u8n_vec_type; + LLVMBuilderRef builder = bld->gallivm->builder; + struct lp_build_context i32; + LLVMTypeRef i32_vec_type; LLVMValueRef i32_c8, i32_c128, i32_c255; LLVMValueRef width_vec, height_vec, depth_vec; - LLVMValueRef s_ipart, s_fpart, s_fpart_lo, s_fpart_hi; - LLVMValueRef t_ipart = NULL, t_fpart = NULL, t_fpart_lo = NULL, t_fpart_hi = NULL; - LLVMValueRef r_ipart = NULL, r_fpart = NULL, r_fpart_lo = NULL, r_fpart_hi = NULL; + LLVMValueRef s_ipart, s_fpart, s_float; + LLVMValueRef t_ipart = NULL, t_fpart = NULL, t_float = NULL; + LLVMValueRef r_ipart = NULL, r_fpart = NULL, r_float = NULL; LLVMValueRef x_stride, y_stride, z_stride; LLVMValueRef x_offset0, x_offset1; LLVMValueRef y_offset0, y_offset1; LLVMValueRef z_offset0, z_offset1; LLVMValueRef offset[2][2][2]; /* [z][y][x] */ LLVMValueRef x_subcoord[2], y_subcoord[2], z_subcoord[2]; - LLVMValueRef neighbors_lo[2][2][2]; /* [z][y][x] */ - LLVMValueRef neighbors_hi[2][2][2]; /* [z][y][x] */ - LLVMValueRef packed_lo, packed_hi; unsigned x, y, z; - unsigned i, j, k; - unsigned numj, numk; - lp_build_context_init(&i32, builder, lp_type_int_vec(32)); - lp_build_context_init(&h16, builder, lp_type_ufixed(16)); - lp_build_context_init(&u8n, builder, lp_type_unorm(8)); + lp_build_context_init(&i32, bld->gallivm, lp_type_int_vec(32, bld->vector_width)); - i32_vec_type = lp_build_vec_type(i32.type); - h16_vec_type = lp_build_vec_type(h16.type); - u8n_vec_type = lp_build_vec_type(u8n.type); + i32_vec_type = lp_build_vec_type(bld->gallivm, i32.type); lp_build_extract_image_sizes(bld, - bld->int_size_type, + &bld->int_size_bld, bld->int_coord_type, int_size, &width_vec, &height_vec, &depth_vec); - if (bld->static_state->normalized_coords) { + s_float = s; t_float = t; r_float = r; + + if (bld->static_sampler_state->normalized_coords) { LLVMValueRef scaled_size; LLVMValueRef flt_size; @@ -494,9 +1044,11 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, r = LLVMBuildFPToSI(builder, r, i32_vec_type, ""); /* subtract 0.5 (add -128) */ - i32_c128 = lp_build_const_int_vec(i32.type, -128); - s = LLVMBuildAdd(builder, s, i32_c128, ""); - if (dims >= 2) { + i32_c128 = lp_build_const_int_vec(bld->gallivm, i32.type, -128); + if (!bld->static_sampler_state->force_nearest_s) { + s = LLVMBuildAdd(builder, s, i32_c128, ""); + } + if (dims >= 2 && !bld->static_sampler_state->force_nearest_t) { t = LLVMBuildAdd(builder, t, i32_c128, ""); } if (dims >= 3) { @@ -504,7 +1056,7 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, } /* compute floor (shift right 8) */ - i32_c8 = lp_build_const_int_vec(i32.type, 8); + i32_c8 = lp_build_const_int_vec(bld->gallivm, i32.type, 8); s_ipart = LLVMBuildAShr(builder, s, i32_c8, ""); if (dims >= 2) t_ipart = LLVMBuildAShr(builder, t, i32_c8, ""); @@ -512,7 +1064,7 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, r_ipart = LLVMBuildAShr(builder, r, i32_c8, ""); /* compute fractional part (AND with 0xff) */ - i32_c255 = lp_build_const_int_vec(i32.type, 255); + i32_c255 = lp_build_const_int_vec(bld->gallivm, i32.type, 255); s_fpart = LLVMBuildAnd(builder, s, i32_c255, ""); if (dims >= 2) t_fpart = LLVMBuildAnd(builder, t, i32_c255, ""); @@ -520,7 +1072,7 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, r_fpart = LLVMBuildAnd(builder, r, i32_c255, ""); /* get pixel, row and image strides */ - x_stride = lp_build_const_vec(bld->int_coord_bld.type, + x_stride = lp_build_const_vec(bld->gallivm, bld->int_coord_bld.type, bld->format_desc->block.bits/8); y_stride = row_stride_vec; z_stride = img_stride_vec; @@ -528,11 +1080,28 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, /* do texcoord wrapping and compute texel offsets */ lp_build_sample_wrap_linear_int(bld, bld->format_desc->block.width, - s_ipart, width_vec, x_stride, - bld->static_state->pot_width, - bld->static_state->wrap_s, + s_ipart, &s_fpart, s_float, + width_vec, x_stride, + bld->static_texture_state->pot_width, + bld->static_sampler_state->wrap_s, &x_offset0, &x_offset1, &x_subcoord[0], &x_subcoord[1]); + + /* add potential cube/array/mip offsets now as they are constant per pixel */ + if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE || + bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY || + bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) { + LLVMValueRef z_offset; + z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec); + /* The r coord is the cube face in [0,5] or array layer */ + x_offset0 = lp_build_add(&bld->int_coord_bld, x_offset0, z_offset); + x_offset1 = lp_build_add(&bld->int_coord_bld, x_offset1, z_offset); + } + if (mipoffsets) { + x_offset0 = lp_build_add(&bld->int_coord_bld, x_offset0, mipoffsets); + x_offset1 = lp_build_add(&bld->int_coord_bld, x_offset1, mipoffsets); + } + for (z = 0; z < 2; z++) { for (y = 0; y < 2; y++) { offset[z][y][0] = x_offset0; @@ -543,9 +1112,10 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, if (dims >= 2) { lp_build_sample_wrap_linear_int(bld, bld->format_desc->block.height, - t_ipart, height_vec, y_stride, - bld->static_state->pot_height, - bld->static_state->wrap_t, + t_ipart, &t_fpart, t_float, + height_vec, y_stride, + bld->static_texture_state->pot_height, + bld->static_sampler_state->wrap_t, &y_offset0, &y_offset1, &y_subcoord[0], &y_subcoord[1]); @@ -562,9 +1132,10 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, if (dims >= 3) { lp_build_sample_wrap_linear_int(bld, bld->format_desc->block.height, - r_ipart, depth_vec, z_stride, - bld->static_state->pot_depth, - bld->static_state->wrap_r, + r_ipart, &r_fpart, r_float, + depth_vec, z_stride, + bld->static_texture_state->pot_depth, + bld->static_sampler_state->wrap_r, &z_offset0, &z_offset1, &z_subcoord[0], &z_subcoord[1]); for (y = 0; y < 2; y++) { @@ -576,198 +1147,182 @@ lp_build_sample_image_linear(struct lp_build_sample_context *bld, } } } - else if (bld->static_state->target == PIPE_TEXTURE_CUBE) { - LLVMValueRef z_offset; - z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec); - for (y = 0; y < 2; y++) { - for (x = 0; x < 2; x++) { - /* The r coord is the cube face in [0,5] */ - offset[0][y][x] = lp_build_add(&bld->int_coord_bld, - offset[0][y][x], z_offset); - } - } - } - /* - * Transform 4 x i32 in - * - * s_fpart = {s0, s1, s2, s3} - * - * into 8 x i16 - * - * s_fpart = {00, s0, 00, s1, 00, s2, 00, s3} - * - * into two 8 x i16 - * - * s_fpart_lo = {s0, s0, s0, s0, s1, s1, s1, s1} - * s_fpart_hi = {s2, s2, s2, s2, s3, s3, s3, s3} - * - * and likewise for t_fpart. There is no risk of loosing precision here - * since the fractional parts only use the lower 8bits. - */ - s_fpart = LLVMBuildBitCast(builder, s_fpart, h16_vec_type, ""); - if (dims >= 2) - t_fpart = LLVMBuildBitCast(builder, t_fpart, h16_vec_type, ""); - if (dims >= 3) - r_fpart = LLVMBuildBitCast(builder, r_fpart, h16_vec_type, ""); + lp_build_sample_fetch_image_linear(bld, data_ptr, offset, + x_subcoord, y_subcoord, + s_fpart, t_fpart, r_fpart, + colors_lo, colors_hi); +} - { - LLVMTypeRef elem_type = LLVMInt32Type(); - LLVMValueRef shuffles_lo[LP_MAX_VECTOR_LENGTH]; - LLVMValueRef shuffles_hi[LP_MAX_VECTOR_LENGTH]; - LLVMValueRef shuffle_lo; - LLVMValueRef shuffle_hi; - for (j = 0; j < h16.type.length; j += 4) { -#ifdef PIPE_ARCH_LITTLE_ENDIAN - unsigned subindex = 0; -#else - unsigned subindex = 1; -#endif - LLVMValueRef index; +/** + * Sample a single texture image with (bi-)(tri-)linear sampling. + * Return filtered color as two vectors of 16-bit fixed point values. + * Does address calcs (except offsets) with floats. + * Useful for AVX which has support for 8x32 floats but not 8x32 ints. + */ +static void +lp_build_sample_image_linear_afloat(struct lp_build_sample_context *bld, + LLVMValueRef int_size, + LLVMValueRef row_stride_vec, + LLVMValueRef img_stride_vec, + LLVMValueRef data_ptr, + LLVMValueRef mipoffsets, + LLVMValueRef s, + LLVMValueRef t, + LLVMValueRef r, + LLVMValueRef *colors_lo, + LLVMValueRef *colors_hi) +{ + const unsigned dims = bld->dims; + LLVMValueRef width_vec, height_vec, depth_vec; + LLVMValueRef s_fpart; + LLVMValueRef t_fpart = NULL; + LLVMValueRef r_fpart = NULL; + LLVMValueRef x_stride, y_stride, z_stride; + LLVMValueRef x_offset0, x_offset1; + LLVMValueRef y_offset0, y_offset1; + LLVMValueRef z_offset0, z_offset1; + LLVMValueRef offset[2][2][2]; /* [z][y][x] */ + LLVMValueRef x_subcoord[2], y_subcoord[2]; + LLVMValueRef flt_size; + LLVMValueRef x_icoord0, x_icoord1; + LLVMValueRef y_icoord0, y_icoord1; + LLVMValueRef z_icoord0, z_icoord1; + unsigned x, y, z; - index = LLVMConstInt(elem_type, j/2 + subindex, 0); - for (i = 0; i < 4; ++i) - shuffles_lo[j + i] = index; + flt_size = lp_build_int_to_float(&bld->float_size_bld, int_size); - index = LLVMConstInt(elem_type, h16.type.length/2 + j/2 + subindex, 0); - for (i = 0; i < 4; ++i) - shuffles_hi[j + i] = index; - } + lp_build_extract_image_sizes(bld, + &bld->float_size_bld, + bld->coord_type, + flt_size, + &width_vec, + &height_vec, + &depth_vec); - shuffle_lo = LLVMConstVector(shuffles_lo, h16.type.length); - shuffle_hi = LLVMConstVector(shuffles_hi, h16.type.length); + /* do texcoord wrapping and compute texel offsets */ + lp_build_sample_wrap_linear_float(bld, + bld->format_desc->block.width, + s, width_vec, + bld->static_texture_state->pot_width, + bld->static_sampler_state->wrap_s, + &x_icoord0, &x_icoord1, + &s_fpart, + bld->static_sampler_state->force_nearest_s); + + if (dims >= 2) { + lp_build_sample_wrap_linear_float(bld, + bld->format_desc->block.height, + t, height_vec, + bld->static_texture_state->pot_height, + bld->static_sampler_state->wrap_t, + &y_icoord0, &y_icoord1, + &t_fpart, + bld->static_sampler_state->force_nearest_t); - s_fpart_lo = LLVMBuildShuffleVector(builder, s_fpart, h16.undef, - shuffle_lo, ""); - s_fpart_hi = LLVMBuildShuffleVector(builder, s_fpart, h16.undef, - shuffle_hi, ""); - if (dims >= 2) { - t_fpart_lo = LLVMBuildShuffleVector(builder, t_fpart, h16.undef, - shuffle_lo, ""); - t_fpart_hi = LLVMBuildShuffleVector(builder, t_fpart, h16.undef, - shuffle_hi, ""); - } if (dims >= 3) { - r_fpart_lo = LLVMBuildShuffleVector(builder, r_fpart, h16.undef, - shuffle_lo, ""); - r_fpart_hi = LLVMBuildShuffleVector(builder, r_fpart, h16.undef, - shuffle_hi, ""); + lp_build_sample_wrap_linear_float(bld, + bld->format_desc->block.height, + r, depth_vec, + bld->static_texture_state->pot_depth, + bld->static_sampler_state->wrap_r, + &z_icoord0, &z_icoord1, + &r_fpart, 0); } } /* - * Fetch the pixels as 4 x 32bit (rgba order might differ): - * - * rgba0 rgba1 rgba2 rgba3 - * - * bit cast them into 16 x u8 - * - * r0 g0 b0 a0 r1 g1 b1 a1 r2 g2 b2 a2 r3 g3 b3 a3 - * - * unpack them into two 8 x i16: - * - * r0 g0 b0 a0 r1 g1 b1 a1 - * r2 g2 b2 a2 r3 g3 b3 a3 - * - * The higher 8 bits of the resulting elements will be zero. + * From here on we deal with ints, and we should split up the 256bit + * vectors manually for better generated code. */ - numj = 1 + (dims >= 2); - numk = 1 + (dims >= 3); - for (k = 0; k < numk; k++) { - for (j = 0; j < numj; j++) { - for (i = 0; i < 2; i++) { - LLVMValueRef rgba8; - - if (util_format_is_rgba8_variant(bld->format_desc)) { - /* - * Given the format is a rgba8, just read the pixels as is, - * without any swizzling. Swizzling will be done later. - */ - rgba8 = lp_build_gather(bld->builder, - bld->texel_type.length, - bld->format_desc->block.bits, - bld->texel_type.width, - data_ptr, offset[k][j][i]); + /* get pixel, row and image strides */ + x_stride = lp_build_const_vec(bld->gallivm, + bld->int_coord_bld.type, + bld->format_desc->block.bits/8); + y_stride = row_stride_vec; + z_stride = img_stride_vec; - rgba8 = LLVMBuildBitCast(builder, rgba8, u8n_vec_type, ""); - } - else { - rgba8 = lp_build_fetch_rgba_aos(bld->builder, - bld->format_desc, - u8n.type, - data_ptr, offset[k][j][i], - x_subcoord[i], - y_subcoord[j]); - } + /* + * compute texel offset - + * cannot do offset calc with floats, difficult for block-based formats, + * and not enough precision anyway. + */ + lp_build_sample_partial_offset(&bld->int_coord_bld, + bld->format_desc->block.width, + x_icoord0, x_stride, + &x_offset0, &x_subcoord[0]); + lp_build_sample_partial_offset(&bld->int_coord_bld, + bld->format_desc->block.width, + x_icoord1, x_stride, + &x_offset1, &x_subcoord[1]); + + /* add potential cube/array/mip offsets now as they are constant per pixel */ + if (bld->static_texture_state->target == PIPE_TEXTURE_CUBE || + bld->static_texture_state->target == PIPE_TEXTURE_1D_ARRAY || + bld->static_texture_state->target == PIPE_TEXTURE_2D_ARRAY) { + LLVMValueRef z_offset; + z_offset = lp_build_mul(&bld->int_coord_bld, r, img_stride_vec); + /* The r coord is the cube face in [0,5] or array layer */ + x_offset0 = lp_build_add(&bld->int_coord_bld, x_offset0, z_offset); + x_offset1 = lp_build_add(&bld->int_coord_bld, x_offset1, z_offset); + } + if (mipoffsets) { + x_offset0 = lp_build_add(&bld->int_coord_bld, x_offset0, mipoffsets); + x_offset1 = lp_build_add(&bld->int_coord_bld, x_offset1, mipoffsets); + } - /* Expand one 4*rgba8 to two 2*rgba16 */ - lp_build_unpack2(builder, u8n.type, h16.type, - rgba8, - &neighbors_lo[k][j][i], &neighbors_hi[k][j][i]); - } + for (z = 0; z < 2; z++) { + for (y = 0; y < 2; y++) { + offset[z][y][0] = x_offset0; + offset[z][y][1] = x_offset1; } } - /* - * Linear interpolation with 8.8 fixed point. - */ - if (dims == 1) { - /* 1-D lerp */ - packed_lo = lp_build_lerp(&h16, - s_fpart_lo, - neighbors_lo[0][0][0], - neighbors_lo[0][0][1]); - - packed_hi = lp_build_lerp(&h16, - s_fpart_hi, - neighbors_hi[0][0][0], - neighbors_hi[0][0][1]); + if (dims >= 2) { + lp_build_sample_partial_offset(&bld->int_coord_bld, + bld->format_desc->block.height, + y_icoord0, y_stride, + &y_offset0, &y_subcoord[0]); + lp_build_sample_partial_offset(&bld->int_coord_bld, + bld->format_desc->block.height, + y_icoord1, y_stride, + &y_offset1, &y_subcoord[1]); + for (z = 0; z < 2; z++) { + for (x = 0; x < 2; x++) { + offset[z][0][x] = lp_build_add(&bld->int_coord_bld, + offset[z][0][x], y_offset0); + offset[z][1][x] = lp_build_add(&bld->int_coord_bld, + offset[z][1][x], y_offset1); + } + } } - else { - /* 2-D lerp */ - packed_lo = lp_build_lerp_2d(&h16, - s_fpart_lo, t_fpart_lo, - neighbors_lo[0][0][0], - neighbors_lo[0][0][1], - neighbors_lo[0][1][0], - neighbors_lo[0][1][1]); - - packed_hi = lp_build_lerp_2d(&h16, - s_fpart_hi, t_fpart_hi, - neighbors_hi[0][0][0], - neighbors_hi[0][0][1], - neighbors_hi[0][1][0], - neighbors_hi[0][1][1]); - if (dims >= 3) { - LLVMValueRef packed_lo2, packed_hi2; - - /* lerp in the second z slice */ - packed_lo2 = lp_build_lerp_2d(&h16, - s_fpart_lo, t_fpart_lo, - neighbors_lo[1][0][0], - neighbors_lo[1][0][1], - neighbors_lo[1][1][0], - neighbors_lo[1][1][1]); - - packed_hi2 = lp_build_lerp_2d(&h16, - s_fpart_hi, t_fpart_hi, - neighbors_hi[1][0][0], - neighbors_hi[1][0][1], - neighbors_hi[1][1][0], - neighbors_hi[1][1][1]); - /* interp between two z slices */ - packed_lo = lp_build_lerp(&h16, r_fpart_lo, - packed_lo, packed_lo2); - packed_hi = lp_build_lerp(&h16, r_fpart_hi, - packed_hi, packed_hi2); + if (dims >= 3) { + LLVMValueRef z_subcoord[2]; + lp_build_sample_partial_offset(&bld->int_coord_bld, + 1, + z_icoord0, z_stride, + &z_offset0, &z_subcoord[0]); + lp_build_sample_partial_offset(&bld->int_coord_bld, + 1, + z_icoord1, z_stride, + &z_offset1, &z_subcoord[1]); + for (y = 0; y < 2; y++) { + for (x = 0; x < 2; x++) { + offset[0][y][x] = lp_build_add(&bld->int_coord_bld, + offset[0][y][x], z_offset0); + offset[1][y][x] = lp_build_add(&bld->int_coord_bld, + offset[1][y][x], z_offset1); + } } } - *colors_lo = packed_lo; - *colors_hi = packed_hi; + lp_build_sample_fetch_image_linear(bld, data_ptr, offset, + x_subcoord, y_subcoord, + s_fpart, t_fpart, r_fpart, + colors_lo, colors_hi); } @@ -790,38 +1345,66 @@ lp_build_sample_mipmap(struct lp_build_sample_context *bld, LLVMValueRef colors_lo_var, LLVMValueRef colors_hi_var) { - LLVMBuilderRef builder = bld->builder; + LLVMBuilderRef builder = bld->gallivm->builder; LLVMValueRef size0; LLVMValueRef size1; - LLVMValueRef row_stride0_vec; - LLVMValueRef row_stride1_vec; - LLVMValueRef img_stride0_vec; - LLVMValueRef img_stride1_vec; + LLVMValueRef row_stride0_vec = NULL; + LLVMValueRef row_stride1_vec = NULL; + LLVMValueRef img_stride0_vec = NULL; + LLVMValueRef img_stride1_vec = NULL; LLVMValueRef data_ptr0; LLVMValueRef data_ptr1; + LLVMValueRef mipoff0 = NULL; + LLVMValueRef mipoff1 = NULL; LLVMValueRef colors0_lo, colors0_hi; LLVMValueRef colors1_lo, colors1_hi; - /* sample the first mipmap level */ lp_build_mipmap_level_sizes(bld, ilevel0, &size0, &row_stride0_vec, &img_stride0_vec); - data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0); - if (img_filter == PIPE_TEX_FILTER_NEAREST) { - lp_build_sample_image_nearest(bld, - size0, - row_stride0_vec, img_stride0_vec, - data_ptr0, s, t, r, - &colors0_lo, &colors0_hi); + if (bld->num_lods == 1) { + data_ptr0 = lp_build_get_mipmap_level(bld, ilevel0); } else { - assert(img_filter == PIPE_TEX_FILTER_LINEAR); - lp_build_sample_image_linear(bld, - size0, - row_stride0_vec, img_stride0_vec, - data_ptr0, s, t, r, - &colors0_lo, &colors0_hi); + /* This path should work for num_lods 1 too but slightly less efficient */ + data_ptr0 = bld->base_ptr; + mipoff0 = lp_build_get_mip_offsets(bld, ilevel0); + } + + if (util_cpu_caps.has_avx && bld->coord_type.length > 4) { + if (img_filter == PIPE_TEX_FILTER_NEAREST) { + lp_build_sample_image_nearest_afloat(bld, + size0, + row_stride0_vec, img_stride0_vec, + data_ptr0, mipoff0, s, t, r, + &colors0_lo, &colors0_hi); + } + else { + assert(img_filter == PIPE_TEX_FILTER_LINEAR); + lp_build_sample_image_linear_afloat(bld, + size0, + row_stride0_vec, img_stride0_vec, + data_ptr0, mipoff0, s, t, r, + &colors0_lo, &colors0_hi); + } + } + else { + if (img_filter == PIPE_TEX_FILTER_NEAREST) { + lp_build_sample_image_nearest(bld, + size0, + row_stride0_vec, img_stride0_vec, + data_ptr0, mipoff0, s, t, r, + &colors0_lo, &colors0_hi); + } + else { + assert(img_filter == PIPE_TEX_FILTER_LINEAR); + lp_build_sample_image_linear(bld, + size0, + row_stride0_vec, img_stride0_vec, + data_ptr0, mipoff0, s, t, r, + &colors0_lo, &colors0_hi); + } } /* Store the first level's colors in the output variables */ @@ -829,74 +1412,147 @@ lp_build_sample_mipmap(struct lp_build_sample_context *bld, LLVMBuildStore(builder, colors0_hi, colors_hi_var); if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) { - LLVMValueRef h16_scale = LLVMConstReal(LLVMFloatType(), 256.0); - LLVMTypeRef i32_type = LLVMIntType(32); + LLVMValueRef h16vec_scale = lp_build_const_vec(bld->gallivm, + bld->perquadf_bld.type, 256.0); + LLVMTypeRef i32vec_type = lp_build_vec_type(bld->gallivm, bld->perquadi_bld.type); struct lp_build_if_state if_ctx; LLVMValueRef need_lerp; + unsigned num_quads = bld->coord_bld.type.length / 4; + unsigned i; - lod_fpart = LLVMBuildFMul(builder, lod_fpart, h16_scale, ""); - lod_fpart = LLVMBuildFPToSI(builder, lod_fpart, i32_type, "lod_fpart.fixed16"); + lod_fpart = LLVMBuildFMul(builder, lod_fpart, h16vec_scale, ""); + lod_fpart = LLVMBuildFPToSI(builder, lod_fpart, i32vec_type, "lod_fpart.fixed16"); /* need_lerp = lod_fpart > 0 */ - need_lerp = LLVMBuildICmp(builder, LLVMIntSGT, - lod_fpart, LLVMConstNull(i32_type), - "need_lerp"); + if (num_quads == 1) { + need_lerp = LLVMBuildICmp(builder, LLVMIntSGT, + lod_fpart, bld->perquadi_bld.zero, + "need_lerp"); + } + else { + /* + * We'll do mip filtering if any of the quads need it. + * It might be better to split the vectors here and only fetch/filter + * quads which need it. + */ + /* + * We need to clamp lod_fpart here since we can get negative + * values which would screw up filtering if not all + * lod_fpart values have same sign. + * We can however then skip the greater than comparison. + */ + lod_fpart = lp_build_max(&bld->perquadi_bld, lod_fpart, + bld->perquadi_bld.zero); + need_lerp = lp_build_any_true_range(&bld->perquadi_bld, num_quads, lod_fpart); + } - lp_build_if(&if_ctx, builder, need_lerp); + lp_build_if(&if_ctx, bld->gallivm, need_lerp); { struct lp_build_context h16_bld; - lp_build_context_init(&h16_bld, builder, lp_type_ufixed(16)); + lp_build_context_init(&h16_bld, bld->gallivm, lp_type_ufixed(16, bld->vector_width)); /* sample the second mipmap level */ lp_build_mipmap_level_sizes(bld, ilevel1, &size1, &row_stride1_vec, &img_stride1_vec); - data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1); - if (img_filter == PIPE_TEX_FILTER_NEAREST) { - lp_build_sample_image_nearest(bld, - size1, - row_stride1_vec, img_stride1_vec, - data_ptr1, s, t, r, - &colors1_lo, &colors1_hi); + lp_build_mipmap_level_sizes(bld, ilevel1, + &size1, + &row_stride1_vec, &img_stride1_vec); + if (bld->num_lods == 1) { + data_ptr1 = lp_build_get_mipmap_level(bld, ilevel1); } else { - lp_build_sample_image_linear(bld, - size1, - row_stride1_vec, img_stride1_vec, - data_ptr1, s, t, r, - &colors1_lo, &colors1_hi); + data_ptr1 = bld->base_ptr; + mipoff1 = lp_build_get_mip_offsets(bld, ilevel1); + } + + if (util_cpu_caps.has_avx && bld->coord_type.length > 4) { + if (img_filter == PIPE_TEX_FILTER_NEAREST) { + lp_build_sample_image_nearest_afloat(bld, + size1, + row_stride1_vec, img_stride1_vec, + data_ptr1, mipoff1, s, t, r, + &colors1_lo, &colors1_hi); + } + else { + lp_build_sample_image_linear_afloat(bld, + size1, + row_stride1_vec, img_stride1_vec, + data_ptr1, mipoff1, s, t, r, + &colors1_lo, &colors1_hi); + } + } + else { + if (img_filter == PIPE_TEX_FILTER_NEAREST) { + lp_build_sample_image_nearest(bld, + size1, + row_stride1_vec, img_stride1_vec, + data_ptr1, mipoff1, s, t, r, + &colors1_lo, &colors1_hi); + } + else { + lp_build_sample_image_linear(bld, + size1, + row_stride1_vec, img_stride1_vec, + data_ptr1, mipoff1, s, t, r, + &colors1_lo, &colors1_hi); + } } /* interpolate samples from the two mipmap levels */ - lod_fpart = LLVMBuildTrunc(builder, lod_fpart, h16_bld.elem_type, ""); - lod_fpart = lp_build_broadcast_scalar(&h16_bld, lod_fpart); + if (num_quads == 1) { + lod_fpart = LLVMBuildTrunc(builder, lod_fpart, h16_bld.elem_type, ""); + lod_fpart = lp_build_broadcast_scalar(&h16_bld, lod_fpart); #if HAVE_LLVM == 0x208 - /* This is a work-around for a bug in LLVM 2.8. - * Evidently, something goes wrong in the construction of the - * lod_fpart short[8] vector. Adding this no-effect shuffle seems - * to force the vector to be properly constructed. - * Tested with mesa-demos/src/tests/mipmap_limits.c (press t, f). - */ - { - LLVMValueRef shuffles[8], shuffle; - int i; - assert(h16_bld.type.length <= Elements(shuffles)); - for (i = 0; i < h16_bld.type.length; i++) - shuffles[i] = lp_build_const_int32(2 * (i & 1)); - shuffle = LLVMConstVector(shuffles, h16_bld.type.length); - lod_fpart = LLVMBuildShuffleVector(builder, - lod_fpart, lod_fpart, - shuffle, ""); - } + /* This is a work-around for a bug in LLVM 2.8. + * Evidently, something goes wrong in the construction of the + * lod_fpart short[8] vector. Adding this no-effect shuffle seems + * to force the vector to be properly constructed. + * Tested with mesa-demos/src/tests/mipmap_limits.c (press t, f). + */ + { + LLVMValueRef shuffles[8], shuffle; + assert(h16_bld.type.length <= Elements(shuffles)); + for (i = 0; i < h16_bld.type.length; i++) + shuffles[i] = lp_build_const_int32(bld->gallivm, 2 * (i & 1)); + shuffle = LLVMConstVector(shuffles, h16_bld.type.length); + lod_fpart = LLVMBuildShuffleVector(builder, + lod_fpart, lod_fpart, + shuffle, ""); + } #endif - colors0_lo = lp_build_lerp(&h16_bld, lod_fpart, - colors0_lo, colors1_lo); - colors0_hi = lp_build_lerp(&h16_bld, lod_fpart, - colors0_hi, colors1_hi); + colors0_lo = lp_build_lerp(&h16_bld, lod_fpart, + colors0_lo, colors1_lo); + colors0_hi = lp_build_lerp(&h16_bld, lod_fpart, + colors0_hi, colors1_hi); + } + else { + LLVMValueRef lod_parts[LP_MAX_VECTOR_LENGTH/16]; + struct lp_type perquadi16_type = bld->perquadi_bld.type; + perquadi16_type.width /= 2; + perquadi16_type.length *= 2; + lod_fpart = LLVMBuildBitCast(builder, lod_fpart, + lp_build_vec_type(bld->gallivm, + perquadi16_type), ""); + /* XXX this only works for exactly 2 quads. More quads need shuffle */ + assert(num_quads == 2); + for (i = 0; i < num_quads; i++) { + LLVMValueRef indexi2 = lp_build_const_int32(bld->gallivm, i*2); + lod_parts[i] = lp_build_extract_broadcast(bld->gallivm, + perquadi16_type, + h16_bld.type, + lod_fpart, + indexi2); + } + colors0_lo = lp_build_lerp(&h16_bld, lod_parts[0], + colors0_lo, colors1_lo); + colors0_hi = lp_build_lerp(&h16_bld, lod_parts[1], + colors0_hi, colors1_hi); + } LLVMBuildStore(builder, colors0_lo, colors_lo_var); LLVMBuildStore(builder, colors0_hi, colors_hi_var); @@ -914,123 +1570,46 @@ lp_build_sample_mipmap(struct lp_build_sample_context *bld, */ void lp_build_sample_aos(struct lp_build_sample_context *bld, - unsigned unit, + unsigned sampler_unit, LLVMValueRef s, LLVMValueRef t, LLVMValueRef r, - const LLVMValueRef *ddx, - const LLVMValueRef *ddy, - LLVMValueRef lod_bias, /* optional */ - LLVMValueRef explicit_lod, /* optional */ + LLVMValueRef lod_ipart, + LLVMValueRef lod_fpart, + LLVMValueRef ilevel0, + LLVMValueRef ilevel1, LLVMValueRef texel_out[4]) { struct lp_build_context *int_bld = &bld->int_bld; - LLVMBuilderRef builder = bld->builder; - const unsigned mip_filter = bld->static_state->min_mip_filter; - const unsigned min_filter = bld->static_state->min_img_filter; - const unsigned mag_filter = bld->static_state->mag_img_filter; + LLVMBuilderRef builder = bld->gallivm->builder; + const unsigned mip_filter = bld->static_sampler_state->min_mip_filter; + const unsigned min_filter = bld->static_sampler_state->min_img_filter; + const unsigned mag_filter = bld->static_sampler_state->mag_img_filter; const unsigned dims = bld->dims; - LLVMValueRef lod_ipart = NULL, lod_fpart = NULL; - LLVMValueRef ilevel0, ilevel1 = NULL; LLVMValueRef packed, packed_lo, packed_hi; LLVMValueRef unswizzled[4]; - LLVMValueRef face_ddx[4], face_ddy[4]; struct lp_build_context h16_bld; - LLVMTypeRef i32t = LLVMInt32Type(); - LLVMValueRef i32t_zero = LLVMConstInt(i32t, 0, 0); /* we only support the common/simple wrap modes at this time */ - assert(lp_is_simple_wrap_mode(bld->static_state->wrap_s)); + assert(lp_is_simple_wrap_mode(bld->static_sampler_state->wrap_s)); if (dims >= 2) - assert(lp_is_simple_wrap_mode(bld->static_state->wrap_t)); + assert(lp_is_simple_wrap_mode(bld->static_sampler_state->wrap_t)); if (dims >= 3) - assert(lp_is_simple_wrap_mode(bld->static_state->wrap_r)); + assert(lp_is_simple_wrap_mode(bld->static_sampler_state->wrap_r)); /* make 16-bit fixed-pt builder context */ - lp_build_context_init(&h16_bld, builder, lp_type_ufixed(16)); - - /* cube face selection, compute pre-face coords, etc. */ - if (bld->static_state->target == PIPE_TEXTURE_CUBE) { - LLVMValueRef face, face_s, face_t; - lp_build_cube_lookup(bld, s, t, r, &face, &face_s, &face_t); - s = face_s; /* vec */ - t = face_t; /* vec */ - /* use 'r' to indicate cube face */ - r = lp_build_broadcast_scalar(&bld->int_coord_bld, face); /* vec */ - - /* recompute ddx, ddy using the new (s,t) face texcoords */ - face_ddx[0] = lp_build_scalar_ddx(&bld->coord_bld, s); - face_ddx[1] = lp_build_scalar_ddx(&bld->coord_bld, t); - face_ddx[2] = NULL; - face_ddx[3] = NULL; - face_ddy[0] = lp_build_scalar_ddy(&bld->coord_bld, s); - face_ddy[1] = lp_build_scalar_ddy(&bld->coord_bld, t); - face_ddy[2] = NULL; - face_ddy[3] = NULL; - ddx = face_ddx; - ddy = face_ddy; - } - - /* - * Compute the level of detail (float). - */ - if (min_filter != mag_filter || - mip_filter != PIPE_TEX_MIPFILTER_NONE) { - /* Need to compute lod either to choose mipmap levels or to - * distinguish between minification/magnification with one mipmap level. - */ - lp_build_lod_selector(bld, unit, ddx, ddy, - lod_bias, explicit_lod, - mip_filter, - &lod_ipart, &lod_fpart); - } else { - lod_ipart = i32t_zero; - } - - /* - * Compute integer mipmap level(s) to fetch texels from: ilevel0, ilevel1 - */ - switch (mip_filter) { - default: - assert(0 && "bad mip_filter value in lp_build_sample_aos()"); - /* fall-through */ - case PIPE_TEX_MIPFILTER_NONE: - /* always use mip level 0 */ - if (bld->static_state->target == PIPE_TEXTURE_CUBE) { - /* XXX this is a work-around for an apparent bug in LLVM 2.7. - * We should be able to set ilevel0 = const(0) but that causes - * bad x86 code to be emitted. - */ - assert(lod_ipart); - lp_build_nearest_mip_level(bld, unit, lod_ipart, &ilevel0); - } - else { - ilevel0 = i32t_zero; - } - break; - case PIPE_TEX_MIPFILTER_NEAREST: - assert(lod_ipart); - lp_build_nearest_mip_level(bld, unit, lod_ipart, &ilevel0); - break; - case PIPE_TEX_MIPFILTER_LINEAR: - assert(lod_ipart); - assert(lod_fpart); - lp_build_linear_mip_levels(bld, unit, - lod_ipart, &lod_fpart, - &ilevel0, &ilevel1); - break; - } + lp_build_context_init(&h16_bld, bld->gallivm, lp_type_ufixed(16, bld->vector_width)); /* * Get/interpolate texture colors. */ - packed_lo = lp_build_alloca(builder, h16_bld.vec_type, "packed_lo"); - packed_hi = lp_build_alloca(builder, h16_bld.vec_type, "packed_hi"); + packed_lo = lp_build_alloca(bld->gallivm, h16_bld.vec_type, "packed_lo"); + packed_hi = lp_build_alloca(bld->gallivm, h16_bld.vec_type, "packed_hi"); if (min_filter == mag_filter) { - /* no need to distinquish between minification and magnification */ + /* no need to distinguish between minification and magnification */ lp_build_sample_mipmap(bld, min_filter, mip_filter, s, t, r, @@ -1044,11 +1623,24 @@ lp_build_sample_aos(struct lp_build_sample_context *bld, struct lp_build_if_state if_ctx; LLVMValueRef minify; + /* + * XXX this should to all lods into account, if some are min + * some max probably could hack up the coords/weights in the linear + * path with selects to work for nearest. + * If that's just two quads sitting next to each other it seems + * quite ok to do the same filtering method on both though, at + * least unless we have explicit lod (and who uses different + * min/mag filter with that?) + */ + if (bld->num_lods > 1) + lod_ipart = LLVMBuildExtractElement(builder, lod_ipart, + lp_build_const_int32(bld->gallivm, 0), ""); + /* minify = lod >= 0.0 */ minify = LLVMBuildICmp(builder, LLVMIntSGE, lod_ipart, int_bld->zero, ""); - lp_build_if(&if_ctx, builder, minify); + lp_build_if(&if_ctx, bld->gallivm, minify); { /* Use the minification filter */ lp_build_sample_mipmap(bld, @@ -1063,7 +1655,7 @@ lp_build_sample_aos(struct lp_build_sample_context *bld, lp_build_sample_mipmap(bld, mag_filter, PIPE_TEX_MIPFILTER_NONE, s, t, r, - i32t_zero, NULL, NULL, + ilevel0, NULL, NULL, packed_lo, packed_hi); } lp_build_endif(&if_ctx); @@ -1073,15 +1665,15 @@ lp_build_sample_aos(struct lp_build_sample_context *bld, * combine the values stored in 'packed_lo' and 'packed_hi' variables * into 'packed' */ - packed = lp_build_pack2(builder, - h16_bld.type, lp_type_unorm(8), + packed = lp_build_pack2(bld->gallivm, + h16_bld.type, lp_type_unorm(8, bld->vector_width), LLVMBuildLoad(builder, packed_lo, ""), LLVMBuildLoad(builder, packed_hi, "")); /* * Convert to SoA and swizzle. */ - lp_build_rgba8_to_f32_soa(builder, + lp_build_rgba8_to_fi32_soa(bld->gallivm, bld->texel_type, packed, unswizzled); @@ -1096,6 +1688,4 @@ lp_build_sample_aos(struct lp_build_sample_context *bld, texel_out[2] = unswizzled[2]; texel_out[3] = unswizzled[3]; } - - apply_sampler_swizzle(bld, texel_out); }