*/
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_memory.h"
#include "util/u_math.h"
#include "util/u_pointer.h"
#include "lp_bld_format.h"
#include "lp_bld_pack.h"
#include "lp_bld_intr.h"
-
+#include "lp_bld_logic.h"
+#include "lp_bld_bitarit.h"
+#include "lp_bld_misc.h"
/**
* Basic swizzling. Rearrange the order of the unswizzled array elements
return TRUE;
}
+/*
+ * Do rounding when converting small unorm values to larger ones.
+ * Not quite 100% accurate, as it's done by appending MSBs, but
+ * should be good enough.
+ */
+
+static inline LLVMValueRef
+scale_bits_up(struct gallivm_state *gallivm,
+ int src_bits,
+ int dst_bits,
+ LLVMValueRef src,
+ struct lp_type src_type)
+{
+ LLVMBuilderRef builder = gallivm->builder;
+ LLVMValueRef result = src;
+
+ if (src_bits == 1 && dst_bits > 1) {
+ /*
+ * Useful for a1 - we'd need quite some repeated copies otherwise.
+ */
+ struct lp_build_context bld;
+ LLVMValueRef dst_mask;
+ lp_build_context_init(&bld, gallivm, src_type);
+ dst_mask = lp_build_const_int_vec(gallivm, src_type,
+ (1 << dst_bits) - 1),
+ result = lp_build_cmp(&bld, PIPE_FUNC_EQUAL, src,
+ lp_build_const_int_vec(gallivm, src_type, 0));
+ result = lp_build_andnot(&bld, dst_mask, result);
+ }
+ else if (dst_bits > src_bits) {
+ /* Scale up bits */
+ int db = dst_bits - src_bits;
+
+ /* Shift left by difference in bits */
+ result = LLVMBuildShl(builder,
+ src,
+ lp_build_const_int_vec(gallivm, src_type, db),
+ "");
+
+ if (db <= src_bits) {
+ /* Enough bits in src to fill the remainder */
+ LLVMValueRef lower = LLVMBuildLShr(builder,
+ src,
+ lp_build_const_int_vec(gallivm, src_type,
+ src_bits - db),
+ "");
+
+ result = LLVMBuildOr(builder, result, lower, "");
+ } else if (db > src_bits) {
+ /* Need to repeatedly copy src bits to fill remainder in dst */
+ unsigned n;
+
+ for (n = src_bits; n < dst_bits; n *= 2) {
+ LLVMValueRef shuv = lp_build_const_int_vec(gallivm, src_type, n);
+
+ result = LLVMBuildOr(builder,
+ result,
+ LLVMBuildLShr(builder, result, shuv, ""),
+ "");
+ }
+ }
+ } else {
+ assert (dst_bits == src_bits);
+ }
+
+ return result;
+}
/**
* Unpack a single pixel into its XYZW components.
* \param ptr address of the pixel block (or the texel if uncompressed)
* \param i, j the sub-block pixel coordinates. For non-compressed formats
* these will always be (0, 0).
+ * \param cache optional value pointing to a lp_build_format_cache structure
* \return a 4 element vector with the pixel's RGBA values.
*/
LLVMValueRef
LLVMValueRef j,
LLVMValueRef cache)
{
+ const struct util_format_unpack_description *unpack =
+ util_format_unpack_description(format_desc->format);
LLVMBuilderRef builder = gallivm->builder;
unsigned num_pixels = type.length / 4;
struct lp_build_context bld;
if (format_matches_type(format_desc, type) &&
format_desc->block.bits <= type.width * 4 &&
/* XXX this shouldn't be needed */
- util_is_power_of_two(format_desc->block.bits)) {
+ util_is_power_of_two_or_zero(format_desc->block.bits)) {
LLVMValueRef packed;
LLVMTypeRef dst_vec_type = lp_build_vec_type(gallivm, type);
struct lp_type fetch_type;
return lp_build_format_swizzle_aos(format_desc, &bld, packed);
}
+ /*
+ * Bit arithmetic for converting small_unorm to unorm8.
+ *
+ * This misses some opportunities for optimizations (like skipping mask
+ * for the highest channel for instance, or doing bit scaling in parallel
+ * for channels with the same bit width) but it should be passable for
+ * all arithmetic formats.
+ */
+ if (format_desc->layout == UTIL_FORMAT_LAYOUT_PLAIN &&
+ format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB &&
+ util_format_fits_8unorm(format_desc) &&
+ type.width == 8 && type.norm == 1 && type.sign == 0 &&
+ type.fixed == 0 && type.floating == 0) {
+ LLVMValueRef packed, res = NULL, chans[4], rgba[4];
+ LLVMTypeRef dst_vec_type, conv_vec_type;
+ struct lp_type fetch_type, conv_type;
+ struct lp_build_context bld_conv;
+ unsigned j;
+
+ fetch_type = lp_type_uint(type.width*4);
+ conv_type = lp_type_int_vec(type.width*4, type.width * type.length);
+ dst_vec_type = lp_build_vec_type(gallivm, type);
+ conv_vec_type = lp_build_vec_type(gallivm, conv_type);
+ lp_build_context_init(&bld_conv, gallivm, conv_type);
+
+ packed = lp_build_gather(gallivm, type.length/4,
+ format_desc->block.bits, fetch_type,
+ aligned, base_ptr, offset, TRUE);
+
+ assert(format_desc->block.bits * type.length / 4 <=
+ type.width * type.length);
+
+ packed = LLVMBuildBitCast(gallivm->builder, packed, conv_vec_type, "");
+
+ for (j = 0; j < format_desc->nr_channels; ++j) {
+ unsigned mask = 0;
+ unsigned sa = format_desc->channel[j].shift;
+
+ mask = (1 << format_desc->channel[j].size) - 1;
+
+ /* Extract bits from source */
+ chans[j] = LLVMBuildLShr(builder, packed,
+ lp_build_const_int_vec(gallivm, conv_type, sa),
+ "");
+
+ chans[j] = LLVMBuildAnd(builder, chans[j],
+ lp_build_const_int_vec(gallivm, conv_type, mask),
+ "");
+
+ /* Scale bits */
+ if (type.norm) {
+ chans[j] = scale_bits_up(gallivm, format_desc->channel[j].size,
+ type.width, chans[j], conv_type);
+ }
+ }
+ /*
+ * This is a hacked lp_build_format_swizzle_soa() since we need a
+ * normalized 1 but only 8 bits in a 32bit vector...
+ */
+ for (j = 0; j < 4; ++j) {
+ enum pipe_swizzle swizzle = format_desc->swizzle[j];
+ if (swizzle == PIPE_SWIZZLE_1) {
+ rgba[j] = lp_build_const_int_vec(gallivm, conv_type, (1 << type.width) - 1);
+ } else {
+ rgba[j] = lp_build_swizzle_soa_channel(&bld_conv, chans, swizzle);
+ }
+ if (j == 0) {
+ res = rgba[j];
+ } else {
+ rgba[j] = LLVMBuildShl(builder, rgba[j],
+ lp_build_const_int_vec(gallivm, conv_type,
+ j * type.width), "");
+ res = LLVMBuildOr(builder, res, rgba[j], "");
+ }
+ }
+ res = LLVMBuildBitCast(gallivm->builder, res, dst_vec_type, "");
+
+ return res;
+ }
+
/*
* Bit arithmetic
*/
format_desc->block.width == 1 &&
format_desc->block.height == 1 &&
/* XXX this shouldn't be needed */
- util_is_power_of_two(format_desc->block.bits) &&
+ util_is_power_of_two_or_zero(format_desc->block.bits) &&
format_desc->block.bits <= 32 &&
format_desc->is_bitmask &&
!format_desc->is_mixed &&
unsigned k, num_conv_src, num_conv_dst;
/*
- * XXX: We end up here for the AoS unorm8 sampling (if the format wasn't some
- * 888(8) variant), so things like rgb565. This is _really_ suboptimal.
- * Not only do we a single pixel at a time but we convert to float,
- * do a normalize mul, un-normalize mul, convert back to int, finally pack
- * down to 8 bits. At the end throw in a couple of shifts/ands/ors for aos
- * swizzle (well rgb565 is ok but bgrx5551 not for instance) for good
- * measure. (And if we're not extra careful we get some pointless min/max
- * too for clamping values to range). This is a disaster of epic proportions,
- * simply forcing SoA sampling would be way faster (even when we don't have
- * AVX support).
- * We should make sure we cannot hit this code path for anything but single
- * pixels.
+ * Note this path is generally terrible for fetching multiple pixels.
+ * We should make sure we cannot hit this code path for anything but
+ * single pixels.
*/
/*
* s3tc rgb formats
*/
- if (format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC && cache) {
+ if (format_desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
+ struct lp_type tmp_type;
+ LLVMValueRef tmp;
+
+ memset(&tmp_type, 0, sizeof tmp_type);
+ tmp_type.width = 8;
+ tmp_type.length = num_pixels * 4;
+ tmp_type.norm = TRUE;
+
+ tmp = lp_build_fetch_s3tc_rgba_aos(gallivm,
+ format_desc,
+ num_pixels,
+ base_ptr,
+ offset,
+ i, j,
+ cache);
+
+ lp_build_conv(gallivm,
+ tmp_type, type,
+ &tmp, 1, &tmp, 1);
+
+ return tmp;
+ }
+
+ /*
+ * rgtc rgb formats
+ */
+
+ if (format_desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
struct lp_type tmp_type;
LLVMValueRef tmp;
tmp_type.width = 8;
tmp_type.length = num_pixels * 4;
tmp_type.norm = TRUE;
+ tmp_type.sign = (format_desc->format == PIPE_FORMAT_RGTC1_SNORM ||
+ format_desc->format == PIPE_FORMAT_RGTC2_SNORM ||
+ format_desc->format == PIPE_FORMAT_LATC1_SNORM ||
+ format_desc->format == PIPE_FORMAT_LATC2_SNORM);
- tmp = lp_build_fetch_cached_texels(gallivm,
+ tmp = lp_build_fetch_rgtc_rgba_aos(gallivm,
format_desc,
num_pixels,
base_ptr,
* Fallback to util_format_description::fetch_rgba_8unorm().
*/
- if (format_desc->fetch_rgba_8unorm &&
+ if (unpack->fetch_rgba_8unorm &&
!type.floating && type.width == 8 && !type.sign && type.norm) {
/*
* Fallback to calling util_format_description::fetch_rgba_8unorm.
function_type = LLVMFunctionType(ret_type, arg_types,
ARRAY_SIZE(arg_types), 0);
+ if (gallivm->cache)
+ gallivm->cache->dont_cache = true;
/* make const pointer for the C fetch_rgba_8unorm function */
function = lp_build_const_int_pointer(gallivm,
- func_to_pointer((func_pointer) format_desc->fetch_rgba_8unorm));
+ func_to_pointer((func_pointer) unpack->fetch_rgba_8unorm));
/* cast the callee pointer to the function's type */
function = LLVMBuildBitCast(builder, function,
}
/*
- * Fallback to util_format_description::fetch_rgba_float().
+ * Fallback to fetch_rgba().
*/
- if (format_desc->fetch_rgba_float) {
+ util_format_fetch_rgba_func_ptr fetch_rgba =
+ util_format_fetch_rgba_func(format_desc->format);
+ if (fetch_rgba) {
/*
* Fallback to calling util_format_description::fetch_rgba_float.
*
}
/*
- * Declare and bind format_desc->fetch_rgba_float().
+ * Declare and bind unpack->fetch_rgba_float().
*/
{
arg_types[2] = i32t;
arg_types[3] = i32t;
+ if (gallivm->cache)
+ gallivm->cache->dont_cache = true;
function = lp_build_const_func_pointer(gallivm,
- func_to_pointer((func_pointer) format_desc->fetch_rgba_float),
+ func_to_pointer((func_pointer) fetch_rgba),
ret_type,
arg_types, ARRAY_SIZE(arg_types),
format_desc->short_name);