*/
-#include "pipe/p_state.h"
+#include "util/u_memory.h"
+#include "util/u_debug.h"
+#include "util/u_string.h"
+#include "util/u_cpu_detect.h"
+#include "lp_bld_type.h"
+#include "lp_bld_const.h"
+#include "lp_bld_intr.h"
+#include "lp_bld_logic.h"
#include "lp_bld_arit.h"
-LLVMValueRef
-lp_build_const_aos(LLVMTypeRef type,
- double r, double g, double b, double a,
- const unsigned char *swizzle)
-{
- const unsigned char default_swizzle[4] = {0, 1, 2, 3};
- LLVMTypeRef elem_type;
- unsigned num_elems;
- unsigned elem_width;
- LLVMValueRef elems[LP_MAX_VECTOR_SIZE];
- double scale;
- unsigned i;
-
- num_elems = LLVMGetVectorSize(type);
- assert(num_elems % 4 == 0);
- assert(num_elems < LP_MAX_VECTOR_SIZE);
-
- elem_type = LLVMGetElementType(type);
+/**
+ * Generate min(a, b)
+ * No checks for special case values of a or b = 1 or 0 are done.
+ */
+static LLVMValueRef
+lp_build_min_simple(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ const struct lp_type type = bld->type;
+ const char *intrinsic = NULL;
+ LLVMValueRef cond;
- if(swizzle == NULL)
- swizzle = default_swizzle;
+ /* TODO: optimize the constant case */
- switch(LLVMGetTypeKind(elem_type)) {
- case LLVMFloatTypeKind:
- for(i = 0; i < num_elems; i += 4) {
- elems[i + swizzle[0]] = LLVMConstReal(elem_type, r);
- elems[i + swizzle[1]] = LLVMConstReal(elem_type, g);
- elems[i + swizzle[2]] = LLVMConstReal(elem_type, b);
- elems[i + swizzle[3]] = LLVMConstReal(elem_type, a);
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(type.width * type.length == 128) {
+ if(type.floating) {
+ if(type.width == 32)
+ intrinsic = "llvm.x86.sse.min.ps";
+ if(type.width == 64)
+ intrinsic = "llvm.x86.sse2.min.pd";
}
- break;
-
- case LLVMIntegerTypeKind:
- elem_width = LLVMGetIntTypeWidth(elem_type);
- assert(elem_width <= 32);
- scale = (double)((1 << elem_width) - 1);
- for(i = 0; i < num_elems; i += 4) {
- elems[i + swizzle[0]] = LLVMConstInt(elem_type, r*scale + 0.5, 0);
- elems[i + swizzle[1]] = LLVMConstInt(elem_type, g*scale + 0.5, 0);
- elems[i + swizzle[2]] = LLVMConstInt(elem_type, b*scale + 0.5, 0);
- elems[i + swizzle[3]] = LLVMConstInt(elem_type, a*scale + 0.5, 0);
+ else {
+ if(type.width == 8 && !type.sign)
+ intrinsic = "llvm.x86.sse2.pminu.b";
+ if(type.width == 8 && type.sign)
+ intrinsic = "llvm.x86.sse41.pminsb";
+ if(type.width == 16 && !type.sign)
+ intrinsic = "llvm.x86.sse41.pminuw";
+ if(type.width == 16 && type.sign)
+ intrinsic = "llvm.x86.sse2.pmins.w";
+ if(type.width == 32 && !type.sign)
+ intrinsic = "llvm.x86.sse41.pminud";
+ if(type.width == 32 && type.sign)
+ intrinsic = "llvm.x86.sse41.pminsd";
}
- break;
-
- default:
- assert(0);
- return LLVMGetUndef(type);
}
+#endif
+
+ if(intrinsic)
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
- return LLVMConstVector(elems, num_elems);
+ cond = lp_build_cmp(bld, PIPE_FUNC_LESS, a, b);
+ return lp_build_select(bld, cond, a, b);
}
-
+
+/**
+ * Generate max(a, b)
+ * No checks for special case values of a or b = 1 or 0 are done.
+ */
static LLVMValueRef
-lp_build_intrinsic_binary(LLVMBuilderRef builder,
- const char *name,
- LLVMValueRef a,
- LLVMValueRef b)
-{
- LLVMModuleRef module = LLVMGetGlobalParent(LLVMGetBasicBlockParent(LLVMGetInsertBlock(builder)));
- LLVMValueRef function;
- LLVMValueRef args[2];
-
- function = LLVMGetNamedFunction(module, name);
- if(!function) {
- LLVMTypeRef type = LLVMTypeOf(a);
- LLVMTypeRef arg_types[2];
- arg_types[0] = type;
- arg_types[1] = type;
- function = LLVMAddFunction(module, name, LLVMFunctionType(type, arg_types, 2, 0));
- LLVMSetFunctionCallConv(function, LLVMCCallConv);
- LLVMSetLinkage(function, LLVMExternalLinkage);
+lp_build_max_simple(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ const struct lp_type type = bld->type;
+ const char *intrinsic = NULL;
+ LLVMValueRef cond;
+
+ /* TODO: optimize the constant case */
+
+ if(type.width * type.length == 128) {
+ if(type.floating) {
+ if(type.width == 32 && util_cpu_caps.has_sse)
+ intrinsic = "llvm.x86.sse.max.ps";
+ if(type.width == 64 && util_cpu_caps.has_sse2)
+ intrinsic = "llvm.x86.sse2.max.pd";
+ }
+ else {
+ if(type.width == 8 && !type.sign && util_cpu_caps.has_sse2)
+ intrinsic = "llvm.x86.sse2.pmaxu.b";
+ if(type.width == 8 && type.sign && util_cpu_caps.has_sse4_1)
+ intrinsic = "llvm.x86.sse41.pmaxsb";
+ if(type.width == 16 && !type.sign && util_cpu_caps.has_sse4_1)
+ intrinsic = "llvm.x86.sse41.pmaxuw";
+ if(type.width == 16 && type.sign && util_cpu_caps.has_sse2)
+ intrinsic = "llvm.x86.sse2.pmaxs.w";
+ if(type.width == 32 && !type.sign && util_cpu_caps.has_sse4_1)
+ intrinsic = "llvm.x86.sse41.pmaxud";
+ if(type.width == 32 && type.sign && util_cpu_caps.has_sse4_1)
+ intrinsic = "llvm.x86.sse41.pmaxsd";
+ }
}
- assert(LLVMIsDeclaration(function));
- args[0] = a;
- args[1] = b;
+ if(intrinsic)
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
- return LLVMBuildCall(builder, function, args, 2, "");
+ cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, b);
+ return lp_build_select(bld, cond, a, b);
}
+/**
+ * Generate 1 - a, or ~a depending on bld->type.
+ */
LLVMValueRef
-lp_build_add(LLVMBuilderRef builder,
+lp_build_comp(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+
+ if(a == bld->one)
+ return bld->zero;
+ if(a == bld->zero)
+ return bld->one;
+
+ if(type.norm && !type.floating && !type.fixed && !type.sign) {
+ if(LLVMIsConstant(a))
+ return LLVMConstNot(a);
+ else
+ return LLVMBuildNot(bld->builder, a, "");
+ }
+
+ if(LLVMIsConstant(a))
+ return LLVMConstSub(bld->one, a);
+ else
+ return LLVMBuildSub(bld->builder, bld->one, a, "");
+}
+
+
+/**
+ * Generate a + b
+ */
+LLVMValueRef
+lp_build_add(struct lp_build_context *bld,
LLVMValueRef a,
- LLVMValueRef b,
- LLVMValueRef zero)
+ LLVMValueRef b)
{
- if(a == zero)
+ const struct lp_type type = bld->type;
+ LLVMValueRef res;
+
+ if(a == bld->zero)
return b;
- else if(b == zero)
+ if(b == bld->zero)
return a;
- else if(LLVMIsConstant(a) && LLVMIsConstant(b))
- return LLVMConstAdd(a, b);
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+
+ if(bld->type.norm) {
+ const char *intrinsic = NULL;
+
+ if(a == bld->one || b == bld->one)
+ return bld->one;
+
+ if(util_cpu_caps.has_sse2 &&
+ type.width * type.length == 128 &&
+ !type.floating && !type.fixed) {
+ if(type.width == 8)
+ intrinsic = type.sign ? "llvm.x86.sse2.padds.b" : "llvm.x86.sse2.paddus.b";
+ if(type.width == 16)
+ intrinsic = type.sign ? "llvm.x86.sse2.padds.w" : "llvm.x86.sse2.paddus.w";
+ }
+
+ if(intrinsic)
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
+ }
+
+ if(LLVMIsConstant(a) && LLVMIsConstant(b))
+ res = LLVMConstAdd(a, b);
else
- return LLVMBuildAdd(builder, a, b, "");
+ res = LLVMBuildAdd(bld->builder, a, b, "");
+
+ /* clamp to ceiling of 1.0 */
+ if(bld->type.norm && (bld->type.floating || bld->type.fixed))
+ res = lp_build_min_simple(bld, res, bld->one);
+
+ /* XXX clamp to floor of -1 or 0??? */
+
+ return res;
}
+/**
+ * Generate a - b
+ */
LLVMValueRef
-lp_build_sub(LLVMBuilderRef builder,
+lp_build_sub(struct lp_build_context *bld,
LLVMValueRef a,
- LLVMValueRef b,
- LLVMValueRef zero)
+ LLVMValueRef b)
{
- if(b == zero)
+ const struct lp_type type = bld->type;
+ LLVMValueRef res;
+
+ if(b == bld->zero)
return a;
- else if(a == b)
- return zero;
- else if(LLVMIsConstant(a) && LLVMIsConstant(b))
- return LLVMConstSub(a, b);
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+ if(a == b)
+ return bld->zero;
+
+ if(bld->type.norm) {
+ const char *intrinsic = NULL;
+
+ if(b == bld->one)
+ return bld->zero;
+
+ if(util_cpu_caps.has_sse2 &&
+ type.width * type.length == 128 &&
+ !type.floating && !type.fixed) {
+ if(type.width == 8)
+ intrinsic = type.sign ? "llvm.x86.sse2.psubs.b" : "llvm.x86.sse2.psubus.b";
+ if(type.width == 16)
+ intrinsic = type.sign ? "llvm.x86.sse2.psubs.w" : "llvm.x86.sse2.psubus.w";
+ }
+
+ if(intrinsic)
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, lp_build_vec_type(bld->type), a, b);
+ }
+
+ if(LLVMIsConstant(a) && LLVMIsConstant(b))
+ res = LLVMConstSub(a, b);
else
- return LLVMBuildSub(builder, a, b, "");
+ res = LLVMBuildSub(bld->builder, a, b, "");
+
+ if(bld->type.norm && (bld->type.floating || bld->type.fixed))
+ res = lp_build_max_simple(bld, res, bld->zero);
+
+ return res;
+}
+
+
+/**
+ * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
+ */
+static LLVMValueRef
+lp_build_unpack_shuffle(unsigned n, unsigned lo_hi)
+{
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i, j;
+
+ assert(n <= LP_MAX_VECTOR_LENGTH);
+ assert(lo_hi < 2);
+
+ for(i = 0, j = lo_hi*n/2; i < n; i += 2, ++j) {
+ elems[i + 0] = LLVMConstInt(LLVMInt32Type(), 0 + j, 0);
+ elems[i + 1] = LLVMConstInt(LLVMInt32Type(), n + j, 0);
+ }
+
+ return LLVMConstVector(elems, n);
+}
+
+
+/**
+ * Build constant int vector of width 'n' and value 'c'.
+ */
+static LLVMValueRef
+lp_build_const_vec(LLVMTypeRef type, unsigned n, long long c)
+{
+ LLVMValueRef elems[LP_MAX_VECTOR_LENGTH];
+ unsigned i;
+
+ assert(n <= LP_MAX_VECTOR_LENGTH);
+
+ for(i = 0; i < n; ++i)
+ elems[i] = LLVMConstInt(type, c, 0);
+
+ return LLVMConstVector(elems, n);
+}
+
+
+/**
+ * Normalized 8bit multiplication.
+ *
+ * - alpha plus one
+ *
+ * makes the following approximation to the division (Sree)
+ *
+ * a*b/255 ~= (a*(b + 1)) >> 256
+ *
+ * which is the fastest method that satisfies the following OpenGL criteria
+ *
+ * 0*0 = 0 and 255*255 = 255
+ *
+ * - geometric series
+ *
+ * takes the geometric series approximation to the division
+ *
+ * t/255 = (t >> 8) + (t >> 16) + (t >> 24) ..
+ *
+ * in this case just the first two terms to fit in 16bit arithmetic
+ *
+ * t/255 ~= (t + (t >> 8)) >> 8
+ *
+ * note that just by itself it doesn't satisfies the OpenGL criteria, as
+ * 255*255 = 254, so the special case b = 255 must be accounted or roundoff
+ * must be used
+ *
+ * - geometric series plus rounding
+ *
+ * when using a geometric series division instead of truncating the result
+ * use roundoff in the approximation (Jim Blinn)
+ *
+ * t/255 ~= (t + (t >> 8) + 0x80) >> 8
+ *
+ * achieving the exact results
+ *
+ * @sa Alvy Ray Smith, Image Compositing Fundamentals, Tech Memo 4, Aug 15, 1995,
+ * ftp://ftp.alvyray.com/Acrobat/4_Comp.pdf
+ * @sa Michael Herf, The "double blend trick", May 2000,
+ * http://www.stereopsis.com/doubleblend.html
+ */
+static LLVMValueRef
+lp_build_mul_u8n(LLVMBuilderRef builder,
+ LLVMValueRef a, LLVMValueRef b)
+{
+ static LLVMValueRef c01 = NULL;
+ static LLVMValueRef c08 = NULL;
+ static LLVMValueRef c80 = NULL;
+ LLVMValueRef ab;
+
+ if(!c01) c01 = lp_build_const_vec(LLVMInt16Type(), 8, 0x01);
+ if(!c08) c08 = lp_build_const_vec(LLVMInt16Type(), 8, 0x08);
+ if(!c80) c80 = lp_build_const_vec(LLVMInt16Type(), 8, 0x80);
+
+#if 0
+
+ /* a*b/255 ~= (a*(b + 1)) >> 256 */
+ b = LLVMBuildAdd(builder, b, c01, "");
+ ab = LLVMBuildMul(builder, a, b, "");
+
+#else
+
+ /* t/255 ~= (t + (t >> 8) + 0x80) >> 8 */
+ ab = LLVMBuildMul(builder, a, b, "");
+ ab = LLVMBuildAdd(builder, ab, LLVMBuildLShr(builder, ab, c08, ""), "");
+ ab = LLVMBuildAdd(builder, ab, c80, "");
+
+#endif
+
+ ab = LLVMBuildLShr(builder, ab, c08, "");
+
+ return ab;
}
+/**
+ * Generate a * b
+ */
LLVMValueRef
-lp_build_mul(LLVMBuilderRef builder,
+lp_build_mul(struct lp_build_context *bld,
LLVMValueRef a,
- LLVMValueRef b,
- LLVMValueRef zero,
- LLVMValueRef one)
+ LLVMValueRef b)
{
- if(a == zero)
- return zero;
- else if(a == one)
+ const struct lp_type type = bld->type;
+
+ if(a == bld->zero)
+ return bld->zero;
+ if(a == bld->one)
return b;
- else if(b == zero)
- return zero;
- else if(b == one)
+ if(b == bld->zero)
+ return bld->zero;
+ if(b == bld->one)
return a;
- else if(LLVMIsConstant(a) && LLVMIsConstant(b))
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+
+ if(!type.floating && !type.fixed && type.norm) {
+ if(util_cpu_caps.has_sse2 && type.width == 8 && type.length == 16) {
+ LLVMTypeRef i16x8 = LLVMVectorType(LLVMInt16Type(), 8);
+ LLVMTypeRef i8x16 = LLVMVectorType(LLVMInt8Type(), 16);
+ static LLVMValueRef ml = NULL;
+ static LLVMValueRef mh = NULL;
+ LLVMValueRef al, ah, bl, bh;
+ LLVMValueRef abl, abh;
+ LLVMValueRef ab;
+
+ if(!ml) ml = lp_build_unpack_shuffle(16, 0);
+ if(!mh) mh = lp_build_unpack_shuffle(16, 1);
+
+ /* PUNPCKLBW, PUNPCKHBW */
+ al = LLVMBuildShuffleVector(bld->builder, a, bld->zero, ml, "");
+ bl = LLVMBuildShuffleVector(bld->builder, b, bld->zero, ml, "");
+ ah = LLVMBuildShuffleVector(bld->builder, a, bld->zero, mh, "");
+ bh = LLVMBuildShuffleVector(bld->builder, b, bld->zero, mh, "");
+
+ /* NOP */
+ al = LLVMBuildBitCast(bld->builder, al, i16x8, "");
+ bl = LLVMBuildBitCast(bld->builder, bl, i16x8, "");
+ ah = LLVMBuildBitCast(bld->builder, ah, i16x8, "");
+ bh = LLVMBuildBitCast(bld->builder, bh, i16x8, "");
+
+ /* PMULLW, PSRLW, PADDW */
+ abl = lp_build_mul_u8n(bld->builder, al, bl);
+ abh = lp_build_mul_u8n(bld->builder, ah, bh);
+
+ /* PACKUSWB */
+ ab = lp_build_intrinsic_binary(bld->builder, "llvm.x86.sse2.packuswb.128" , i16x8, abl, abh);
+
+ /* NOP */
+ ab = LLVMBuildBitCast(bld->builder, ab, i8x16, "");
+
+ return ab;
+ }
+
+ /* FIXME */
+ assert(0);
+ }
+
+ if(LLVMIsConstant(a) && LLVMIsConstant(b))
return LLVMConstMul(a, b);
- else
- return LLVMBuildMul(builder, a, b, "");
+
+ return LLVMBuildMul(bld->builder, a, b, "");
}
+/**
+ * Generate a / b
+ */
LLVMValueRef
-lp_build_min(LLVMBuilderRef builder,
+lp_build_div(struct lp_build_context *bld,
LLVMValueRef a,
LLVMValueRef b)
{
- /* TODO: optimize the constant case */
+ const struct lp_type type = bld->type;
-#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(a == bld->zero)
+ return bld->zero;
+ if(a == bld->one)
+ return lp_build_rcp(bld, b);
+ if(b == bld->zero)
+ return bld->undef;
+ if(b == bld->one)
+ return a;
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
- return lp_build_intrinsic_binary(builder, "llvm.x86.sse.min.ps", a, b);
+ if(LLVMIsConstant(a) && LLVMIsConstant(b))
+ return LLVMConstFDiv(a, b);
-#else
+ if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
+ return lp_build_mul(bld, a, lp_build_rcp(bld, b));
+
+ return LLVMBuildFDiv(bld->builder, a, b, "");
+}
- LLVMValueRef cond = LLVMBuildFCmp(values->builder, LLVMRealULT, a, b, "");
- return LLVMBuildSelect(values->builder, cond, a, b, "");
-#endif
+LLVMValueRef
+lp_build_lerp(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef v0,
+ LLVMValueRef v1)
+{
+ return lp_build_add(bld, v0, lp_build_mul(bld, x, lp_build_sub(bld, v1, v0)));
+}
+
+
+LLVMValueRef
+lp_build_lerp_2d(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef y,
+ LLVMValueRef v00,
+ LLVMValueRef v01,
+ LLVMValueRef v10,
+ LLVMValueRef v11)
+{
+ LLVMValueRef v0 = lp_build_lerp(bld, x, v00, v01);
+ LLVMValueRef v1 = lp_build_lerp(bld, x, v10, v11);
+ return lp_build_lerp(bld, y, v0, v1);
}
+/**
+ * Generate min(a, b)
+ * Do checks for special cases.
+ */
LLVMValueRef
-lp_build_max(LLVMBuilderRef builder,
+lp_build_min(struct lp_build_context *bld,
LLVMValueRef a,
LLVMValueRef b)
{
- /* TODO: optimize the constant case */
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
-#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+ if(a == b)
+ return a;
- return lp_build_intrinsic_binary(builder, "llvm.x86.sse.max.ps", a, b);
+ if(bld->type.norm) {
+ if(a == bld->zero || b == bld->zero)
+ return bld->zero;
+ if(a == bld->one)
+ return b;
+ if(b == bld->one)
+ return a;
+ }
-#else
+ return lp_build_min_simple(bld, a, b);
+}
- LLVMValueRef cond = LLVMBuildFCmp(values->builder, LLVMRealULT, a, b, "");
- return LLVMBuildSelect(values->builder, cond, b, a, "");
-#endif
+/**
+ * Generate max(a, b)
+ * Do checks for special cases.
+ */
+LLVMValueRef
+lp_build_max(struct lp_build_context *bld,
+ LLVMValueRef a,
+ LLVMValueRef b)
+{
+ if(a == bld->undef || b == bld->undef)
+ return bld->undef;
+
+ if(a == b)
+ return a;
+
+ if(bld->type.norm) {
+ if(a == bld->one || b == bld->one)
+ return bld->one;
+ if(a == bld->zero)
+ return b;
+ if(b == bld->zero)
+ return a;
+ }
+
+ return lp_build_max_simple(bld, a, b);
}
+/**
+ * Generate abs(a)
+ */
LLVMValueRef
-lp_build_add_sat(LLVMBuilderRef builder,
- LLVMValueRef a,
- LLVMValueRef b,
- LLVMValueRef zero,
- LLVMValueRef one)
+lp_build_abs(struct lp_build_context *bld,
+ LLVMValueRef a)
{
- if(a == zero)
- return b;
- else if(b == zero)
+ const struct lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+
+ if(!type.sign)
return a;
- else if(a == one || b == one)
- return one;
- else
- return lp_build_min(builder, lp_build_add(builder, a, b, zero), one);
+
+ if(type.floating) {
+ /* Mask out the sign bit */
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+ LLVMValueRef mask = lp_build_int_const_scalar(type, ((unsigned long long)1 << type.width) - 1);
+ a = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
+ a = LLVMBuildAnd(bld->builder, a, mask, "");
+ a = LLVMBuildBitCast(bld->builder, a, vec_type, "");
+ return a;
+ }
+
+ if(type.width*type.length == 128 && util_cpu_caps.has_ssse3) {
+ switch(type.width) {
+ case 8:
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.b.128", vec_type, a);
+ case 16:
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.w.128", vec_type, a);
+ case 32:
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.ssse3.pabs.d.128", vec_type, a);
+ }
+ }
+
+ return lp_build_max(bld, a, LLVMBuildNeg(bld->builder, a, ""));
}
+
LLVMValueRef
-lp_build_sub_sat(LLVMBuilderRef builder,
- LLVMValueRef a,
- LLVMValueRef b,
- LLVMValueRef zero,
- LLVMValueRef one)
+lp_build_sgn(struct lp_build_context *bld,
+ LLVMValueRef a)
{
- if(b == zero)
- return a;
- else if(b == one)
- return zero;
+ const struct lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ LLVMValueRef cond;
+ LLVMValueRef res;
+
+ /* Handle non-zero case */
+ if(!type.sign) {
+ /* if not zero then sign must be positive */
+ res = bld->one;
+ }
+ else if(type.floating) {
+ /* Take the sign bit and add it to 1 constant */
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+ LLVMValueRef mask = lp_build_int_const_scalar(type, (unsigned long long)1 << (type.width - 1));
+ LLVMValueRef sign;
+ LLVMValueRef one;
+ sign = LLVMBuildBitCast(bld->builder, a, int_vec_type, "");
+ sign = LLVMBuildAnd(bld->builder, sign, mask, "");
+ one = LLVMConstBitCast(bld->one, int_vec_type);
+ res = LLVMBuildOr(bld->builder, sign, one, "");
+ res = LLVMBuildBitCast(bld->builder, res, vec_type, "");
+ }
else
- return lp_build_max(builder, lp_build_sub(builder, a, b, zero), zero);
+ {
+ LLVMValueRef minus_one = lp_build_const_scalar(type, -1.0);
+ cond = lp_build_cmp(bld, PIPE_FUNC_GREATER, a, bld->zero);
+ res = lp_build_select(bld, cond, bld->one, minus_one);
+ }
+
+ /* Handle zero */
+ cond = lp_build_cmp(bld, PIPE_FUNC_EQUAL, a, bld->zero);
+ res = lp_build_select(bld, cond, bld->zero, bld->one);
+
+ return res;
+}
+
+
+enum lp_build_round_sse41_mode
+{
+ LP_BUILD_ROUND_SSE41_NEAREST = 0,
+ LP_BUILD_ROUND_SSE41_FLOOR = 1,
+ LP_BUILD_ROUND_SSE41_CEIL = 2,
+ LP_BUILD_ROUND_SSE41_TRUNCATE = 3
+};
+
+
+static INLINE LLVMValueRef
+lp_build_round_sse41(struct lp_build_context *bld,
+ LLVMValueRef a,
+ enum lp_build_round_sse41_mode mode)
+{
+ const struct lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ const char *intrinsic;
+
+ assert(type.floating);
+ assert(type.width*type.length == 128);
+
+ switch(type.width) {
+ case 32:
+ intrinsic = "llvm.x86.sse41.round.ps";
+ break;
+ case 64:
+ intrinsic = "llvm.x86.sse41.round.pd";
+ break;
+ default:
+ assert(0);
+ return bld->undef;
+ }
+
+ return lp_build_intrinsic_binary(bld->builder, intrinsic, vec_type, a,
+ LLVMConstInt(LLVMInt32Type(), mode, 0));
}
+
LLVMValueRef
-lp_build_min_sat(LLVMBuilderRef builder,
- LLVMValueRef a,
- LLVMValueRef b,
- LLVMValueRef zero,
- LLVMValueRef one)
-{
- if(a == zero || b == zero)
- return zero;
- else if(a == one)
- return b;
- else if(b == one)
- return a;
- else
- return lp_build_min(builder, a, b);
+lp_build_round(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+
+ assert(type.floating);
+
+ if(util_cpu_caps.has_sse4_1)
+ return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_NEAREST);
+
+ /* FIXME */
+ assert(0);
+ return bld->undef;
}
LLVMValueRef
-lp_build_max_sat(LLVMBuilderRef builder,
- LLVMValueRef a,
- LLVMValueRef b,
- LLVMValueRef zero,
- LLVMValueRef one)
+lp_build_floor(struct lp_build_context *bld,
+ LLVMValueRef a)
{
- if(a == zero)
- return b;
- else if(b == zero)
- return a;
- else if(a == one || b == one)
- return one;
+ const struct lp_type type = bld->type;
+
+ assert(type.floating);
+
+ if(util_cpu_caps.has_sse4_1)
+ return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_FLOOR);
+
+ /* FIXME */
+ assert(0);
+ return bld->undef;
+}
+
+
+LLVMValueRef
+lp_build_ceil(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+
+ assert(type.floating);
+
+ if(util_cpu_caps.has_sse4_1)
+ return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_CEIL);
+
+ /* FIXME */
+ assert(0);
+ return bld->undef;
+}
+
+
+LLVMValueRef
+lp_build_trunc(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+
+ assert(type.floating);
+
+ if(util_cpu_caps.has_sse4_1)
+ return lp_build_round_sse41(bld, a, LP_BUILD_ROUND_SSE41_TRUNCATE);
+
+ /* FIXME */
+ assert(0);
+ return bld->undef;
+}
+
+
+/**
+ * Convert to integer, through whichever rounding method that's fastest,
+ * typically truncating to zero.
+ */
+LLVMValueRef
+lp_build_int(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+
+ assert(type.floating);
+
+ return LLVMBuildFPToSI(bld->builder, a, int_vec_type, "");
+}
+
+
+LLVMValueRef
+lp_build_ifloor(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ a = lp_build_floor(bld, a);
+ a = lp_build_int(bld, a);
+ return a;
+}
+
+
+LLVMValueRef
+lp_build_sqrt(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ char intrinsic[32];
+
+ /* TODO: optimize the constant case */
+ /* TODO: optimize the constant case */
+
+ assert(type.floating);
+ util_snprintf(intrinsic, sizeof intrinsic, "llvm.sqrt.v%uf%u", type.length, type.width);
+
+ return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
+}
+
+
+LLVMValueRef
+lp_build_rcp(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+
+ if(a == bld->zero)
+ return bld->undef;
+ if(a == bld->one)
+ return bld->one;
+ if(a == bld->undef)
+ return bld->undef;
+
+ assert(type.floating);
+
+ if(LLVMIsConstant(a))
+ return LLVMConstFDiv(bld->one, a);
+
+ if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
+ /* FIXME: improve precision */
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rcp.ps", lp_build_vec_type(type), a);
+
+ return LLVMBuildFDiv(bld->builder, bld->one, a, "");
+}
+
+
+/**
+ * Generate 1/sqrt(a)
+ */
+LLVMValueRef
+lp_build_rsqrt(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+
+ assert(type.floating);
+
+ if(util_cpu_caps.has_sse && type.width == 32 && type.length == 4)
+ return lp_build_intrinsic_unary(bld->builder, "llvm.x86.sse.rsqrt.ps", lp_build_vec_type(type), a);
+
+ return lp_build_rcp(bld, lp_build_sqrt(bld, a));
+}
+
+
+/**
+ * Generate cos(a)
+ */
+LLVMValueRef
+lp_build_cos(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ char intrinsic[32];
+
+ /* TODO: optimize the constant case */
+
+ assert(type.floating);
+ util_snprintf(intrinsic, sizeof intrinsic, "llvm.cos.v%uf%u", type.length, type.width);
+
+ return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
+}
+
+
+/**
+ * Generate sin(a)
+ */
+LLVMValueRef
+lp_build_sin(struct lp_build_context *bld,
+ LLVMValueRef a)
+{
+ const struct lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ char intrinsic[32];
+
+ /* TODO: optimize the constant case */
+
+ assert(type.floating);
+ util_snprintf(intrinsic, sizeof intrinsic, "llvm.sin.v%uf%u", type.length, type.width);
+
+ return lp_build_intrinsic_unary(bld->builder, intrinsic, vec_type, a);
+}
+
+
+/**
+ * Generate pow(x, y)
+ */
+LLVMValueRef
+lp_build_pow(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef y)
+{
+ /* TODO: optimize the constant case */
+ if(LLVMIsConstant(x) && LLVMIsConstant(y))
+ debug_printf("%s: inefficient/imprecise constant arithmetic\n",
+ __FUNCTION__);
+
+ return lp_build_exp2(bld, lp_build_mul(bld, lp_build_log2(bld, x), y));
+}
+
+
+/**
+ * Generate exp(x)
+ */
+LLVMValueRef
+lp_build_exp(struct lp_build_context *bld,
+ LLVMValueRef x)
+{
+ /* log2(e) = 1/log(2) */
+ LLVMValueRef log2e = lp_build_const_scalar(bld->type, 1.4426950408889634);
+
+ return lp_build_mul(bld, log2e, lp_build_exp2(bld, x));
+}
+
+
+/**
+ * Generate log(x)
+ */
+LLVMValueRef
+lp_build_log(struct lp_build_context *bld,
+ LLVMValueRef x)
+{
+ /* log(2) */
+ LLVMValueRef log2 = lp_build_const_scalar(bld->type, 1.4426950408889634);
+
+ return lp_build_mul(bld, log2, lp_build_exp2(bld, x));
+}
+
+
+#define EXP_POLY_DEGREE 3
+#define LOG_POLY_DEGREE 5
+
+
+/**
+ * Generate polynomial.
+ * Ex: x^2 * coeffs[0] + x * coeffs[1] + coeffs[2].
+ */
+static LLVMValueRef
+lp_build_polynomial(struct lp_build_context *bld,
+ LLVMValueRef x,
+ const double *coeffs,
+ unsigned num_coeffs)
+{
+ const struct lp_type type = bld->type;
+ LLVMValueRef res = NULL;
+ unsigned i;
+
+ /* TODO: optimize the constant case */
+ if(LLVMIsConstant(x))
+ debug_printf("%s: inefficient/imprecise constant arithmetic\n",
+ __FUNCTION__);
+
+ for (i = num_coeffs; i--; ) {
+ LLVMValueRef coeff = lp_build_const_scalar(type, coeffs[i]);
+ if(res)
+ res = lp_build_add(bld, coeff, lp_build_mul(bld, x, res));
+ else
+ res = coeff;
+ }
+
+ if(res)
+ return res;
else
- return lp_build_max(builder, a, b);
+ return bld->undef;
+}
+
+
+/**
+ * Minimax polynomial fit of 2**x, in range [-0.5, 0.5[
+ */
+const double lp_build_exp2_polynomial[] = {
+#if EXP_POLY_DEGREE == 5
+ 9.9999994e-1, 6.9315308e-1, 2.4015361e-1, 5.5826318e-2, 8.9893397e-3, 1.8775767e-3
+#elif EXP_POLY_DEGREE == 4
+ 1.0000026, 6.9300383e-1, 2.4144275e-1, 5.2011464e-2, 1.3534167e-2
+#elif EXP_POLY_DEGREE == 3
+ 9.9992520e-1, 6.9583356e-1, 2.2606716e-1, 7.8024521e-2
+#elif EXP_POLY_DEGREE == 2
+ 1.0017247, 6.5763628e-1, 3.3718944e-1
+#else
+#error
+#endif
+};
+
+
+void
+lp_build_exp2_approx(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef *p_exp2_int_part,
+ LLVMValueRef *p_frac_part,
+ LLVMValueRef *p_exp2)
+{
+ const struct lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+ LLVMValueRef ipart = NULL;
+ LLVMValueRef fpart = NULL;
+ LLVMValueRef expipart = NULL;
+ LLVMValueRef expfpart = NULL;
+ LLVMValueRef res = NULL;
+
+ if(p_exp2_int_part || p_frac_part || p_exp2) {
+ /* TODO: optimize the constant case */
+ if(LLVMIsConstant(x))
+ debug_printf("%s: inefficient/imprecise constant arithmetic\n",
+ __FUNCTION__);
+
+ assert(type.floating && type.width == 32);
+
+ x = lp_build_min(bld, x, lp_build_const_scalar(type, 129.0));
+ x = lp_build_max(bld, x, lp_build_const_scalar(type, -126.99999));
+
+ /* ipart = int(x - 0.5) */
+ ipart = LLVMBuildSub(bld->builder, x, lp_build_const_scalar(type, 0.5f), "");
+ ipart = LLVMBuildFPToSI(bld->builder, ipart, int_vec_type, "");
+
+ /* fpart = x - ipart */
+ fpart = LLVMBuildSIToFP(bld->builder, ipart, vec_type, "");
+ fpart = LLVMBuildSub(bld->builder, x, fpart, "");
+ }
+
+ if(p_exp2_int_part || p_exp2) {
+ /* expipart = (float) (1 << ipart) */
+ expipart = LLVMBuildAdd(bld->builder, ipart, lp_build_int_const_scalar(type, 127), "");
+ expipart = LLVMBuildShl(bld->builder, expipart, lp_build_int_const_scalar(type, 23), "");
+ expipart = LLVMBuildBitCast(bld->builder, expipart, vec_type, "");
+ }
+
+ if(p_exp2) {
+ expfpart = lp_build_polynomial(bld, fpart, lp_build_exp2_polynomial,
+ Elements(lp_build_exp2_polynomial));
+
+ res = LLVMBuildMul(bld->builder, expipart, expfpart, "");
+ }
+
+ if(p_exp2_int_part)
+ *p_exp2_int_part = expipart;
+
+ if(p_frac_part)
+ *p_frac_part = fpart;
+
+ if(p_exp2)
+ *p_exp2 = res;
+}
+
+
+LLVMValueRef
+lp_build_exp2(struct lp_build_context *bld,
+ LLVMValueRef x)
+{
+ LLVMValueRef res;
+ lp_build_exp2_approx(bld, x, NULL, NULL, &res);
+ return res;
+}
+
+
+/**
+ * Minimax polynomial fit of log2(x)/(x - 1), for x in range [1, 2[
+ * These coefficients can be generate with
+ * http://www.boost.org/doc/libs/1_36_0/libs/math/doc/sf_and_dist/html/math_toolkit/toolkit/internals2/minimax.html
+ */
+const double lp_build_log2_polynomial[] = {
+#if LOG_POLY_DEGREE == 6
+ 3.11578814719469302614, -3.32419399085241980044, 2.59883907202499966007, -1.23152682416275988241, 0.318212422185251071475, -0.0344359067839062357313
+#elif LOG_POLY_DEGREE == 5
+ 2.8882704548164776201, -2.52074962577807006663, 1.48116647521213171641, -0.465725644288844778798, 0.0596515482674574969533
+#elif LOG_POLY_DEGREE == 4
+ 2.61761038894603480148, -1.75647175389045657003, 0.688243882994381274313, -0.107254423828329604454
+#elif LOG_POLY_DEGREE == 3
+ 2.28330284476918490682, -1.04913055217340124191, 0.204446009836232697516
+#else
+#error
+#endif
+};
+
+
+/**
+ * See http://www.devmaster.net/forums/showthread.php?p=43580
+ */
+void
+lp_build_log2_approx(struct lp_build_context *bld,
+ LLVMValueRef x,
+ LLVMValueRef *p_exp,
+ LLVMValueRef *p_floor_log2,
+ LLVMValueRef *p_log2)
+{
+ const struct lp_type type = bld->type;
+ LLVMTypeRef vec_type = lp_build_vec_type(type);
+ LLVMTypeRef int_vec_type = lp_build_int_vec_type(type);
+
+ LLVMValueRef expmask = lp_build_int_const_scalar(type, 0x7f800000);
+ LLVMValueRef mantmask = lp_build_int_const_scalar(type, 0x007fffff);
+ LLVMValueRef one = LLVMConstBitCast(bld->one, int_vec_type);
+
+ LLVMValueRef i = NULL;
+ LLVMValueRef exp = NULL;
+ LLVMValueRef mant = NULL;
+ LLVMValueRef logexp = NULL;
+ LLVMValueRef logmant = NULL;
+ LLVMValueRef res = NULL;
+
+ if(p_exp || p_floor_log2 || p_log2) {
+ /* TODO: optimize the constant case */
+ if(LLVMIsConstant(x))
+ debug_printf("%s: inefficient/imprecise constant arithmetic\n",
+ __FUNCTION__);
+
+ assert(type.floating && type.width == 32);
+
+ i = LLVMBuildBitCast(bld->builder, x, int_vec_type, "");
+
+ /* exp = (float) exponent(x) */
+ exp = LLVMBuildAnd(bld->builder, i, expmask, "");
+ }
+
+ if(p_floor_log2 || p_log2) {
+ logexp = LLVMBuildLShr(bld->builder, exp, lp_build_int_const_scalar(type, 23), "");
+ logexp = LLVMBuildSub(bld->builder, logexp, lp_build_int_const_scalar(type, 127), "");
+ logexp = LLVMBuildSIToFP(bld->builder, logexp, vec_type, "");
+ }
+
+ if(p_log2) {
+ /* mant = (float) mantissa(x) */
+ mant = LLVMBuildAnd(bld->builder, i, mantmask, "");
+ mant = LLVMBuildOr(bld->builder, mant, one, "");
+ mant = LLVMBuildSIToFP(bld->builder, mant, vec_type, "");
+
+ logmant = lp_build_polynomial(bld, mant, lp_build_log2_polynomial,
+ Elements(lp_build_log2_polynomial));
+
+ /* This effectively increases the polynomial degree by one, but ensures that log2(1) == 0*/
+ logmant = LLVMBuildMul(bld->builder, logmant, LLVMBuildMul(bld->builder, mant, bld->one, ""), "");
+
+ res = LLVMBuildAdd(bld->builder, logmant, logexp, "");
+ }
+
+ if(p_exp)
+ *p_exp = exp;
+
+ if(p_floor_log2)
+ *p_floor_log2 = logexp;
+
+ if(p_log2)
+ *p_log2 = res;
+}
+
+
+LLVMValueRef
+lp_build_log2(struct lp_build_context *bld,
+ LLVMValueRef x)
+{
+ LLVMValueRef res;
+ lp_build_log2_approx(bld, x, NULL, NULL, &res);
+ return res;
}