X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fauxiliary%2Fgallivm%2Flp_bld_conv.c;h=712ce5f92dca1607db2d8db2a7ca88961a72f5f5;hb=5ae31d7e1d3d51c7843571c63aa228f8ca9b879f;hp=cc442369630459e6f5ef640cf6764e8983a0e94c;hpb=fa1b481c09b14e01eca1b3db8e0854033f6dee3d;p=mesa.git diff --git a/src/gallium/auxiliary/gallivm/lp_bld_conv.c b/src/gallium/auxiliary/gallivm/lp_bld_conv.c index cc442369630..712ce5f92dc 100644 --- a/src/gallium/auxiliary/gallivm/lp_bld_conv.c +++ b/src/gallium/auxiliary/gallivm/lp_bld_conv.c @@ -63,143 +63,145 @@ #include "util/u_debug.h" #include "util/u_math.h" +#include "util/u_half.h" #include "util/u_cpu_detect.h" #include "lp_bld_type.h" #include "lp_bld_const.h" #include "lp_bld_arit.h" +#include "lp_bld_bitarit.h" #include "lp_bld_pack.h" #include "lp_bld_conv.h" #include "lp_bld_logic.h" +#include "lp_bld_intr.h" +#include "lp_bld_printf.h" +#include "lp_bld_format.h" + /** * Converts int16 half-float to float32 - * Note this can be performed in 1 instruction if vcvtph2ps exists (sse5 i think?) + * Note this can be performed in 1 instruction if vcvtph2ps exists (f16c/cvt16) * [llvm.x86.vcvtph2ps / _mm_cvtph_ps] * * @param src value to convert * - * ref http://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/ - * ref https://gist.github.com/2144712 */ LLVMValueRef lp_build_half_to_float(struct gallivm_state *gallivm, LLVMValueRef src) { - int src_length = LLVMGetVectorSize(LLVMTypeOf(src)); + LLVMBuilderRef builder = gallivm->builder; + LLVMTypeRef src_type = LLVMTypeOf(src); + unsigned src_length = LLVMGetTypeKind(src_type) == LLVMVectorTypeKind ? + LLVMGetVectorSize(src_type) : 1; struct lp_type f32_type = lp_type_float_vec(32, 32 * src_length); struct lp_type i32_type = lp_type_int_vec(32, 32 * src_length); - - LLVMBuilderRef builder = gallivm->builder; LLVMTypeRef int_vec_type = lp_build_vec_type(gallivm, i32_type); - LLVMTypeRef float_vec_type = lp_build_vec_type(gallivm, f32_type); - - /* Constants */ - LLVMValueRef i32_13 = lp_build_const_int_vec(gallivm, i32_type, 13); - LLVMValueRef i32_16 = lp_build_const_int_vec(gallivm, i32_type, 16); - LLVMValueRef i32_mask_nosign = lp_build_const_int_vec(gallivm, i32_type, 0x7fff); - LLVMValueRef i32_was_infnan = lp_build_const_int_vec(gallivm, i32_type, 0x7bff); - LLVMValueRef i32_exp_infnan = lp_build_const_int_vec(gallivm, i32_type, 0xff << 23); - LLVMValueRef f32_magic = LLVMBuildBitCast(builder, - lp_build_const_int_vec(gallivm, i32_type, (254 - 15) << 23), - float_vec_type, ""); - - /* Convert int16 vector to int32 vector by zero ext */ - LLVMValueRef h = LLVMBuildZExt(builder, src, int_vec_type, ""); - - /* Exponent / mantissa bits */ - LLVMValueRef expmant = LLVMBuildAnd(builder, i32_mask_nosign, h, ""); - LLVMValueRef shifted = LLVMBuildBitCast(builder, LLVMBuildShl(builder, expmant, i32_13, ""), float_vec_type, ""); - - /* Exponent adjust */ - LLVMValueRef scaled = LLVMBuildBitCast(builder, LLVMBuildFMul(builder, shifted, f32_magic, ""), int_vec_type, ""); - - /* Make sure Inf/NaN survive */ - LLVMValueRef b_wasinfnan = lp_build_compare(gallivm, i32_type, PIPE_FUNC_GREATER, expmant, i32_was_infnan); - LLVMValueRef infnanexp = LLVMBuildAnd(builder, b_wasinfnan, i32_exp_infnan, ""); - - /* Sign bit */ - LLVMValueRef justsign = LLVMBuildXor(builder, h, expmant, ""); - LLVMValueRef sign = LLVMBuildShl(builder, justsign, i32_16, ""); - - /* Combine result */ - LLVMValueRef sign_inf = LLVMBuildOr(builder, sign, infnanexp, ""); - LLVMValueRef final = LLVMBuildOr(builder, scaled, sign_inf, ""); - - /* Cast from int32 vector to float32 vector */ - return LLVMBuildBitCast(builder, final, float_vec_type, ""); + LLVMValueRef h; + + if (util_cpu_caps.has_f16c && HAVE_LLVM >= 0x0301 && + (src_length == 4 || src_length == 8)) { + const char *intrinsic = NULL; + if (src_length == 4) { + src = lp_build_pad_vector(gallivm, src, 8); + intrinsic = "llvm.x86.vcvtph2ps.128"; + } + else { + intrinsic = "llvm.x86.vcvtph2ps.256"; + } + return lp_build_intrinsic_unary(builder, intrinsic, + lp_build_vec_type(gallivm, f32_type), src); + } + + /* Convert int16 vector to int32 vector by zero ext (might generate bad code) */ + h = LLVMBuildZExt(builder, src, int_vec_type, ""); + return lp_build_smallfloat_to_float(gallivm, f32_type, h, 10, 5, 0, true); } /** * Converts float32 to int16 half-float - * Note this can be performed in 1 instruction if vcvtps2ph exists (sse5 i think?) + * Note this can be performed in 1 instruction if vcvtps2ph exists (f16c/cvt16) * [llvm.x86.vcvtps2ph / _mm_cvtps_ph] * * @param src value to convert * - * ref http://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/ - * ref https://gist.github.com/2156668 + * Convert float32 to half floats, preserving Infs and NaNs, + * with rounding towards zero (trunc). */ LLVMValueRef lp_build_float_to_half(struct gallivm_state *gallivm, LLVMValueRef src) { - struct lp_type i32_type = lp_type_int_vec(32, 32 * LLVMGetVectorSize(LLVMTypeOf(src))); - LLVMBuilderRef builder = gallivm->builder; - LLVMTypeRef int_vec_type = lp_build_vec_type(gallivm, i32_type); - - struct lp_build_context bld; - + LLVMTypeRef f32_vec_type = LLVMTypeOf(src); + unsigned length = LLVMGetTypeKind(f32_vec_type) == LLVMVectorTypeKind + ? LLVMGetVectorSize(f32_vec_type) : 1; + struct lp_type i32_type = lp_type_int_vec(32, 32 * length); + struct lp_type i16_type = lp_type_int_vec(16, 16 * length); LLVMValueRef result; - lp_build_context_init(&bld, gallivm, i32_type); + if (util_cpu_caps.has_f16c && HAVE_LLVM >= 0x0301 && + (length == 4 || length == 8)) { + struct lp_type i168_type = lp_type_int_vec(16, 16 * 8); + unsigned mode = 3; /* same as LP_BUILD_ROUND_TRUNCATE */ + LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context); + const char *intrinsic = NULL; + if (length == 4) { + intrinsic = "llvm.x86.vcvtps2ph.128"; + } + else { + intrinsic = "llvm.x86.vcvtps2ph.256"; + } + result = lp_build_intrinsic_binary(builder, intrinsic, + lp_build_vec_type(gallivm, i168_type), + src, LLVMConstInt(i32t, mode, 0)); + if (length == 4) { + result = lp_build_extract_range(gallivm, result, 0, 4); + } + } - /* Extra scope because lp_build_min needs a build context, le sigh */ - { - /* Constants */ - LLVMValueRef i32_13 = lp_build_const_int_vec(gallivm, i32_type, 13); - LLVMValueRef i32_16 = lp_build_const_int_vec(gallivm, i32_type, 16); - LLVMValueRef i32_mask_fabs = lp_build_const_int_vec(gallivm, i32_type, 0x7fffffff); - LLVMValueRef i32_f32infty = lp_build_const_int_vec(gallivm, i32_type, 0xff << 23); - LLVMValueRef i32_expinf = lp_build_const_int_vec(gallivm, i32_type, 0xe0 << 23); - LLVMValueRef i32_f16max = lp_build_const_int_vec(gallivm, i32_type, 0x8f << 23); - LLVMValueRef i32_magic = lp_build_const_int_vec(gallivm, i32_type, 0x0f << 23); - - /* Cast from float32 to int32 */ - LLVMValueRef f = LLVMBuildBitCast(builder, src, int_vec_type, ""); - - /* Remove sign */ - LLVMValueRef fabs = LLVMBuildAnd(builder, i32_mask_fabs, f, ""); - - /* Magic conversion */ - LLVMValueRef clamped = lp_build_min(&bld, i32_f16max, fabs); - LLVMValueRef scaled = LLVMBuildMul(builder, clamped, i32_magic, ""); - - /* Make sure Inf/NaN and unormalised survive */ - LLVMValueRef infnancase = LLVMBuildXor(builder, i32_expinf, fabs, ""); - LLVMValueRef b_notnormal = lp_build_compare(gallivm, i32_type, PIPE_FUNC_GREATER, fabs, i32_f32infty); - - /* Merge normal / unnormal case */ - LLVMValueRef merge1 = LLVMBuildAnd(builder, infnancase, b_notnormal, ""); - LLVMValueRef merge2 = LLVMBuildNot(builder, LLVMBuildAnd(builder, b_notnormal, scaled, ""), ""); - LLVMValueRef merged = LLVMBuildOr(builder, merge1, merge2, ""); - LLVMValueRef shifted = LLVMBuildLShr(builder, merged, i32_13, ""); - - /* Sign bit */ - LLVMValueRef justsign = LLVMBuildXor(builder, f, fabs, ""); - LLVMValueRef signshifted = LLVMBuildLShr(builder, justsign, i32_16, ""); - - /* Combine result */ - result = LLVMBuildOr(builder, shifted, signshifted, ""); + else { + result = lp_build_float_to_smallfloat(gallivm, i32_type, src, 10, 5, 0, true); + /* Convert int32 vector to int16 vector by trunc (might generate bad code) */ + result = LLVMBuildTrunc(builder, result, lp_build_vec_type(gallivm, i16_type), ""); } - /* Truncate from 32 bit to 16 bit */ - i32_type.width = 16; - return LLVMBuildTrunc(builder, result, lp_build_vec_type(gallivm, i32_type), ""); + /* + * Debugging code. + */ + if (0) { + LLVMTypeRef i32t = LLVMInt32TypeInContext(gallivm->context); + LLVMTypeRef i16t = LLVMInt16TypeInContext(gallivm->context); + LLVMTypeRef f32t = LLVMFloatTypeInContext(gallivm->context); + LLVMValueRef ref_result = LLVMGetUndef(LLVMVectorType(i16t, length)); + unsigned i; + + LLVMTypeRef func_type = LLVMFunctionType(i16t, &f32t, 1, 0); + LLVMValueRef func = lp_build_const_int_pointer(gallivm, func_to_pointer((func_pointer)util_float_to_half)); + func = LLVMBuildBitCast(builder, func, LLVMPointerType(func_type, 0), "util_float_to_half"); + + for (i = 0; i < length; ++i) { + LLVMValueRef index = LLVMConstInt(i32t, i, 0); + LLVMValueRef f32 = LLVMBuildExtractElement(builder, src, index, ""); +#if 0 + /* XXX: not really supported by backends */ + LLVMValueRef f16 = lp_build_intrinsic_unary(builder, "llvm.convert.to.fp16", i16t, f32); +#else + LLVMValueRef f16 = LLVMBuildCall(builder, func, &f32, 1, ""); +#endif + ref_result = LLVMBuildInsertElement(builder, ref_result, f16, index, ""); + } + + lp_build_print_value(gallivm, "src = ", src); + lp_build_print_value(gallivm, "llvm = ", result); + lp_build_print_value(gallivm, "util = ", ref_result); + lp_build_printf(gallivm, "\n"); + } + + return result; } @@ -255,6 +257,7 @@ lp_build_clamped_float_to_unsigned_norm(struct gallivm_state *gallivm, bias = (double)(1ULL << (mantissa - dst_width)); res = LLVMBuildFMul(builder, src, lp_build_const_vec(gallivm, src_type, scale), ""); + /* instead of fadd/and could (with sse2) just use lp_build_iround */ res = LLVMBuildFAdd(builder, res, lp_build_const_vec(gallivm, src_type, bias), ""); res = LLVMBuildBitCast(builder, res, int_vec_type, ""); res = LLVMBuildAnd(builder, res, @@ -263,17 +266,19 @@ lp_build_clamped_float_to_unsigned_norm(struct gallivm_state *gallivm, else if (dst_width == (mantissa + 1)) { /* * The destination width matches exactly what can be represented in - * floating point (i.e., mantissa + 1 bits). So do a straight - * multiplication followed by casting. No further rounding is necessary. + * floating point (i.e., mantissa + 1 bits). Even so correct rounding + * still needs to be applied (only for numbers in [0.5-1.0] would + * conversion using truncation after scaling be sufficient). */ - double scale; + struct lp_build_context uf32_bld; + lp_build_context_init(&uf32_bld, gallivm, src_type); scale = (double)((1ULL << dst_width) - 1); res = LLVMBuildFMul(builder, src, lp_build_const_vec(gallivm, src_type, scale), ""); - res = LLVMBuildFPToSI(builder, res, int_vec_type, ""); + res = lp_build_iround(&uf32_bld, res); } else { /* @@ -452,24 +457,22 @@ int lp_build_conv_auto(struct gallivm_state *gallivm, dst_type->width == 8) { /* Special case 4x4f --> 1x16ub */ - if (src_type.length == 4 && util_cpu_caps.has_sse2) + if (src_type.length == 4 && + util_cpu_caps.has_sse2) { - assert((num_srcs % 4) == 0); - - num_dsts = num_srcs / 4; - dst_type->length = 16; + num_dsts = (num_srcs + 3) / 4; + dst_type->length = num_srcs * 4 >= 16 ? 16 : num_srcs * 4; lp_build_conv(gallivm, src_type, *dst_type, src, num_srcs, dst, num_dsts); return num_dsts; } /* Special case 2x8f --> 1x16ub */ - if (src_type.length == 8 && util_cpu_caps.has_avx) + if (src_type.length == 8 && + util_cpu_caps.has_avx) { - assert((num_srcs % 2) == 0); - - num_dsts = num_srcs / 2; - dst_type->length = 16; + num_dsts = (num_srcs + 1) / 2; + dst_type->length = num_srcs * 8 >= 16 ? 16 : num_srcs * 8; lp_build_conv(gallivm, src_type, *dst_type, src, num_srcs, dst, num_dsts); return num_dsts; @@ -524,7 +527,7 @@ lp_build_conv(struct gallivm_state *gallivm, num_tmps = num_srcs; - /* Special case 4x4f --> 1x16ub + /* Special case 4x4f --> 1x16ub, 2x4f -> 1x8ub, 1x4f -> 1x4ub */ if (src_type.floating == 1 && src_type.fixed == 0 && @@ -538,20 +541,23 @@ lp_build_conv(struct gallivm_state *gallivm, dst_type.sign == 0 && dst_type.norm == 1 && dst_type.width == 8 && - dst_type.length == 16 && - 4 * num_dsts == num_srcs && + ((dst_type.length == 16 && 4 * num_dsts == num_srcs) || + (num_dsts == 1 && dst_type.length * num_srcs == 16 && num_srcs != 3)) && util_cpu_caps.has_sse2) { struct lp_build_context bld; - struct lp_type int16_type = dst_type; - struct lp_type int32_type = dst_type; + struct lp_type int16_type, int32_type; + struct lp_type dst_type_ext = dst_type; LLVMValueRef const_255f; unsigned i, j; lp_build_context_init(&bld, gallivm, src_type); + dst_type_ext.length = 16; + int16_type = int32_type = dst_type_ext; + int16_type.width *= 2; int16_type.length /= 2; int16_type.sign = 1; @@ -565,21 +571,34 @@ lp_build_conv(struct gallivm_state *gallivm, for (i = 0; i < num_dsts; ++i, src += 4) { LLVMValueRef lo, hi; - for (j = 0; j < 4; ++j) { + for (j = 0; j < dst_type.length / 4; ++j) { tmp[j] = LLVMBuildFMul(builder, src[j], const_255f, ""); tmp[j] = lp_build_iround(&bld, tmp[j]); } + if (num_srcs == 1) { + tmp[1] = tmp[0]; + } + /* relying on clamping behavior of sse2 intrinsics here */ lo = lp_build_pack2(gallivm, int32_type, int16_type, tmp[0], tmp[1]); - hi = lp_build_pack2(gallivm, int32_type, int16_type, tmp[2], tmp[3]); - dst[i] = lp_build_pack2(gallivm, int16_type, dst_type, lo, hi); + + if (num_srcs < 4) { + hi = lo; + } + else { + hi = lp_build_pack2(gallivm, int32_type, int16_type, tmp[2], tmp[3]); + } + dst[i] = lp_build_pack2(gallivm, int16_type, dst_type_ext, lo, hi); + } + if (num_srcs < 4) { + dst[0] = lp_build_extract_range(gallivm, dst[0], 0, dst_type.length); } return; } - /* Special case 2x8f --> 1x16ub + /* Special case 2x8f --> 1x16ub, 1x8f ->1x8ub */ else if (src_type.floating == 1 && src_type.fixed == 0 && @@ -593,20 +612,23 @@ lp_build_conv(struct gallivm_state *gallivm, dst_type.sign == 0 && dst_type.norm == 1 && dst_type.width == 8 && - dst_type.length == 16 && - 2 * num_dsts == num_srcs && + ((dst_type.length == 16 && 2 * num_dsts == num_srcs) || + (num_dsts == 1 && dst_type.length * num_srcs == 8)) && util_cpu_caps.has_avx) { struct lp_build_context bld; - struct lp_type int16_type = dst_type; - struct lp_type int32_type = dst_type; + struct lp_type int16_type, int32_type; + struct lp_type dst_type_ext = dst_type; LLVMValueRef const_255f; unsigned i; lp_build_context_init(&bld, gallivm, src_type); + dst_type_ext.length = 16; + int16_type = int32_type = dst_type_ext; + int16_type.width *= 2; int16_type.length /= 2; int16_type.sign = 1; @@ -621,21 +643,30 @@ lp_build_conv(struct gallivm_state *gallivm, LLVMValueRef lo, hi, a, b; a = LLVMBuildFMul(builder, src[0], const_255f, ""); - b = LLVMBuildFMul(builder, src[1], const_255f, ""); - a = lp_build_iround(&bld, a); - b = lp_build_iround(&bld, b); - tmp[0] = lp_build_extract_range(gallivm, a, 0, 4); tmp[1] = lp_build_extract_range(gallivm, a, 4, 4); - tmp[2] = lp_build_extract_range(gallivm, b, 0, 4); - tmp[3] = lp_build_extract_range(gallivm, b, 4, 4); - /* relying on clamping behavior of sse2 intrinsics here */ lo = lp_build_pack2(gallivm, int32_type, int16_type, tmp[0], tmp[1]); - hi = lp_build_pack2(gallivm, int32_type, int16_type, tmp[2], tmp[3]); - dst[i] = lp_build_pack2(gallivm, int16_type, dst_type, lo, hi); + + if (num_srcs == 1) { + hi = lo; + } + else { + b = LLVMBuildFMul(builder, src[1], const_255f, ""); + b = lp_build_iround(&bld, b); + tmp[2] = lp_build_extract_range(gallivm, b, 0, 4); + tmp[3] = lp_build_extract_range(gallivm, b, 4, 4); + hi = lp_build_pack2(gallivm, int32_type, int16_type, tmp[2], tmp[3]); + + } + dst[i] = lp_build_pack2(gallivm, int16_type, dst_type_ext, lo, hi); + } + + if (num_srcs == 1) { + dst[0] = lp_build_extract_range(gallivm, dst[0], 0, dst_type.length); } + return; } @@ -714,7 +745,6 @@ lp_build_conv(struct gallivm_state *gallivm, } else { double dst_scale = lp_const_scale(dst_type); - LLVMTypeRef tmp_vec_type; if (dst_scale != 1.0) { LLVMValueRef scale = lp_build_const_vec(gallivm, tmp_type, dst_scale); @@ -722,19 +752,38 @@ lp_build_conv(struct gallivm_state *gallivm, tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, ""); } - /* Use an equally sized integer for intermediate computations */ - tmp_type.floating = FALSE; - tmp_vec_type = lp_build_vec_type(gallivm, tmp_type); - for(i = 0; i < num_tmps; ++i) { + /* + * these functions will use fptosi in some form which won't work + * with 32bit uint dst. Causes lp_test_conv failures though. + */ + if (0) + assert(dst_type.sign || dst_type.width < 32); + + if (dst_type.sign && dst_type.norm && !dst_type.fixed) { + struct lp_build_context bld; + + lp_build_context_init(&bld, gallivm, tmp_type); + for(i = 0; i < num_tmps; ++i) { + tmp[i] = lp_build_iround(&bld, tmp[i]); + } + tmp_type.floating = FALSE; + } + else { + LLVMTypeRef tmp_vec_type; + + tmp_type.floating = FALSE; + tmp_vec_type = lp_build_vec_type(gallivm, tmp_type); + for(i = 0; i < num_tmps; ++i) { #if 0 - if(dst_type.sign) - tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, ""); - else - tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, ""); + if(dst_type.sign) + tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, ""); + else + tmp[i] = LLVMBuildFPToUI(builder, tmp[i], tmp_vec_type, ""); #else - /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */ - tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, ""); + /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */ + tmp[i] = LLVMBuildFPToSI(builder, tmp[i], tmp_vec_type, ""); #endif + } } } } @@ -832,6 +881,18 @@ lp_build_conv(struct gallivm_state *gallivm, for(i = 0; i < num_tmps; ++i) tmp[i] = LLVMBuildFMul(builder, tmp[i], scale, ""); } + + /* the formula above will produce value below -1.0 for most negative + * value but everything seems happy with that hence disable for now */ + if (0 && !src_type.fixed && src_type.norm && src_type.sign) { + struct lp_build_context bld; + + lp_build_context_init(&bld, gallivm, dst_type); + for(i = 0; i < num_tmps; ++i) { + tmp[i] = lp_build_max(&bld, tmp[i], + lp_build_const_vec(gallivm, dst_type, -1.0f)); + } + } } } else {