*
* Copyright 2011-2012 Advanced Micro Devices, Inc.
* Copyright 2009 VMware, Inc.
- * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2007-2008 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
scs_emit /* emit */
};
-/* TGSI_OPCODE_SFL */
-
-static void
-sfl_emit(
- const struct lp_build_tgsi_action * action,
- struct lp_build_tgsi_context * bld_base,
- struct lp_build_emit_data * emit_data)
-{
- emit_data->output[emit_data->chan] = bld_base->base.zero;
-}
-
-/* TGSI_OPCODE_STR */
-
+/* TGSI_OPCODE_SUB */
static void
-str_emit(
+sub_emit(
const struct lp_build_tgsi_action * action,
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- emit_data->output[emit_data->chan] = bld_base->base.one;
+ emit_data->output[emit_data->chan] =
+ LLVMBuildFSub(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ emit_data->args[1], "");
}
-/* TGSI_OPCODE_SUB */
+/* TGSI_OPCODE_F2U */
static void
-sub_emit(
+f2u_emit(
const struct lp_build_tgsi_action * action,
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- emit_data->output[emit_data->chan] = LLVMBuildFSub(
- bld_base->base.gallivm->builder,
- emit_data->args[0],
- emit_data->args[1], "");
+ emit_data->output[emit_data->chan] =
+ LLVMBuildFPToUI(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->base.int_vec_type, "");
}
/* TGSI_OPCODE_U2F */
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- emit_data->output[emit_data->chan] = LLVMBuildUIToFP(bld_base->base.gallivm->builder,
- emit_data->args[0],
- bld_base->base.vec_type, "");
+ emit_data->output[emit_data->chan] =
+ LLVMBuildUIToFP(bld_base->base.gallivm->builder,
+ emit_data->args[0],
+ bld_base->base.vec_type, "");
}
static void
emit_data->args[0], emit_data->args[1]);
}
+/* TGSI_OPCODE_IMUL_HI */
+static void
+imul_hi_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ LLVMBuilderRef builder = bld_base->base.gallivm->builder;
+ struct lp_build_context *int_bld = &bld_base->int_bld;
+ struct lp_type type = int_bld->type;
+ LLVMValueRef src0, src1;
+ LLVMValueRef dst64;
+ LLVMTypeRef typeRef;
+
+ assert(type.width == 32);
+ type.width = 64;
+ typeRef = lp_build_vec_type(bld_base->base.gallivm, type);
+ src0 = LLVMBuildSExt(builder, emit_data->args[0], typeRef, "");
+ src1 = LLVMBuildSExt(builder, emit_data->args[1], typeRef, "");
+ dst64 = LLVMBuildMul(builder, src0, src1, "");
+ dst64 = LLVMBuildAShr(
+ builder, dst64,
+ lp_build_const_vec(bld_base->base.gallivm, type, 32), "");
+ type.width = 32;
+ typeRef = lp_build_vec_type(bld_base->base.gallivm, type);
+ emit_data->output[emit_data->chan] =
+ LLVMBuildTrunc(builder, dst64, typeRef, "");
+}
+
+/* TGSI_OPCODE_UMUL_HI */
+static void
+umul_hi_emit(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ LLVMBuilderRef builder = bld_base->base.gallivm->builder;
+ struct lp_build_context *uint_bld = &bld_base->uint_bld;
+ struct lp_type type = uint_bld->type;
+ LLVMValueRef src0, src1;
+ LLVMValueRef dst64;
+ LLVMTypeRef typeRef;
+
+ assert(type.width == 32);
+ type.width = 64;
+ typeRef = lp_build_vec_type(bld_base->base.gallivm, type);
+ src0 = LLVMBuildZExt(builder, emit_data->args[0], typeRef, "");
+ src1 = LLVMBuildZExt(builder, emit_data->args[1], typeRef, "");
+ dst64 = LLVMBuildMul(builder, src0, src1, "");
+ dst64 = LLVMBuildLShr(
+ builder, dst64,
+ lp_build_const_vec(bld_base->base.gallivm, type, 32), "");
+ type.width = 32;
+ typeRef = lp_build_vec_type(bld_base->base.gallivm, type);
+ emit_data->output[emit_data->chan] =
+ LLVMBuildTrunc(builder, dst64, typeRef, "");
+}
+
/* TGSI_OPCODE_MAX */
static void fmax_emit(
const struct lp_build_tgsi_action * action,
bld_base->op_actions[TGSI_OPCODE_MUL].emit = mul_emit;
bld_base->op_actions[TGSI_OPCODE_DIV].emit = fdiv_emit;
bld_base->op_actions[TGSI_OPCODE_RCP].emit = rcp_emit;
- bld_base->op_actions[TGSI_OPCODE_SFL].emit = sfl_emit;
- bld_base->op_actions[TGSI_OPCODE_STR].emit = str_emit;
bld_base->op_actions[TGSI_OPCODE_SUB].emit = sub_emit;
bld_base->op_actions[TGSI_OPCODE_UARL].emit = mov_emit;
+ bld_base->op_actions[TGSI_OPCODE_F2U].emit = f2u_emit;
bld_base->op_actions[TGSI_OPCODE_U2F].emit = u2f_emit;
bld_base->op_actions[TGSI_OPCODE_UMAD].emit = umad_emit;
bld_base->op_actions[TGSI_OPCODE_UMUL].emit = umul_emit;
+ bld_base->op_actions[TGSI_OPCODE_IMUL_HI].emit = imul_hi_emit;
+ bld_base->op_actions[TGSI_OPCODE_UMUL_HI].emit = umul_hi_emit;
bld_base->op_actions[TGSI_OPCODE_MAX].emit = fmax_emit;
bld_base->op_actions[TGSI_OPCODE_MIN].emit = fmin_emit;
cond, emit_data->args[1], emit_data->args[2]);
}
-
-/* TGSI_OPCODE_CND (CPU Only) */
-static void
-cnd_emit_cpu(
- const struct lp_build_tgsi_action * action,
- struct lp_build_tgsi_context * bld_base,
- struct lp_build_emit_data * emit_data)
-{
- LLVMValueRef half, tmp;
- half = lp_build_const_vec(bld_base->base.gallivm, bld_base->base.type, 0.5);
- tmp = lp_build_cmp(&bld_base->base, PIPE_FUNC_GREATER,
- emit_data->args[2], half);
- emit_data->output[emit_data->chan] = lp_build_select(&bld_base->base,
- tmp,
- emit_data->args[0],
- emit_data->args[1]);
-}
-
/* TGSI_OPCODE_COS (CPU Only) */
static void
cos_emit_cpu(
emit_data->args[0]);
}
-/* TGSI_OPCODE_EXP (CPU Only) */
+/* TGSI_OPCODE_F2I (CPU Only) */
static void
-exp_emit_cpu(
+f2i_emit_cpu(
const struct lp_build_tgsi_action * action,
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- lp_build_exp2_approx(&bld_base->base, emit_data->args[0],
- &emit_data->output[TGSI_CHAN_X],
- &emit_data->output[TGSI_CHAN_Y],
- &emit_data->output[TGSI_CHAN_Z]);
- emit_data->output[TGSI_CHAN_W] = bld_base->base.one;
+ emit_data->output[emit_data->chan] = lp_build_itrunc(&bld_base->base,
+ emit_data->args[0]);
}
-/* TGSI_OPCODE_F2I (CPU Only) */
+/* TGSI_OPCODE_FSET Helper (CPU Only) */
static void
-f2i_emit_cpu(
+fset_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data,
+ unsigned pipe_func)
+{
+ LLVMValueRef cond;
+
+ if (pipe_func != PIPE_FUNC_NOTEQUAL) {
+ cond = lp_build_cmp_ordered(&bld_base->base, pipe_func,
+ emit_data->args[0], emit_data->args[1]);
+ }
+ else {
+ cond = lp_build_cmp(&bld_base->base, pipe_func,
+ emit_data->args[0], emit_data->args[1]);
+
+ }
+ emit_data->output[emit_data->chan] = cond;
+}
+
+
+/* TGSI_OPCODE_FSEQ (CPU Only) */
+static void
+fseq_emit_cpu(
const struct lp_build_tgsi_action * action,
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- emit_data->output[emit_data->chan] = lp_build_itrunc(&bld_base->base,
- emit_data->args[0]);
+ fset_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_EQUAL);
}
-/* TGSI_OPCODE_F2U (CPU Only) */
+/* TGSI_OPCODE_ISGE (CPU Only) */
static void
-f2u_emit_cpu(
+fsge_emit_cpu(
const struct lp_build_tgsi_action * action,
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- /* FIXME: implement and use lp_build_utrunc() */
- emit_data->output[emit_data->chan] = lp_build_itrunc(&bld_base->base,
- emit_data->args[0]);
+ fset_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_GEQUAL);
+}
+
+/* TGSI_OPCODE_ISLT (CPU Only) */
+static void
+fslt_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ fset_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_LESS);
+}
+
+/* TGSI_OPCODE_USNE (CPU Only) */
+
+static void
+fsne_emit_cpu(
+ const struct lp_build_tgsi_action * action,
+ struct lp_build_tgsi_context * bld_base,
+ struct lp_build_emit_data * emit_data)
+{
+ fset_emit_cpu(action, bld_base, emit_data, PIPE_FUNC_NOTEQUAL);
}
/* TGSI_OPCODE_FLR (CPU Only) */
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- emit_data->output[emit_data->chan] = lp_build_div(&bld_base->int_bld,
- emit_data->args[0], emit_data->args[1]);
+ LLVMBuilderRef builder = bld_base->base.gallivm->builder;
+ LLVMValueRef div_mask = lp_build_cmp(&bld_base->uint_bld,
+ PIPE_FUNC_EQUAL, emit_data->args[1],
+ bld_base->uint_bld.zero);
+ /* We want to make sure that we never divide/mod by zero to not
+ * generate sigfpe. We don't want to crash just because the
+ * shader is doing something weird. */
+ LLVMValueRef divisor = LLVMBuildOr(builder,
+ div_mask,
+ emit_data->args[1], "");
+ LLVMValueRef result = lp_build_div(&bld_base->int_bld,
+ emit_data->args[0], divisor);
+ LLVMValueRef not_div_mask = LLVMBuildNot(builder,
+ div_mask,"");
+ /* idiv by zero doesn't have a guaranteed return value chose 0 for now. */
+ emit_data->output[emit_data->chan] = LLVMBuildAnd(builder,
+ not_div_mask,
+ result, "");
}
/* TGSI_OPCODE_INEG (CPU Only) */
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- emit_data->output[emit_data->chan] = lp_build_shr(&bld_base->int_bld,
- emit_data->args[0], emit_data->args[1]);
+ struct lp_build_context *int_bld = &bld_base->int_bld;
+ LLVMValueRef mask = lp_build_const_vec(int_bld->gallivm, int_bld->type,
+ int_bld->type.width - 1);
+ LLVMValueRef masked_count = lp_build_and(int_bld, emit_data->args[1], mask);
+ emit_data->output[emit_data->chan] = lp_build_shr(int_bld, emit_data->args[0],
+ masked_count);
}
/* TGSI_OPCODE_ISLT (CPU Only) */
struct lp_build_emit_data * emit_data,
unsigned pipe_func)
{
- LLVMValueRef cond = lp_build_cmp(&bld_base->base, pipe_func,
- emit_data->args[0], emit_data->args[1]);
+ LLVMValueRef cond;
+
+ if (pipe_func != PIPE_FUNC_NOTEQUAL) {
+ cond = lp_build_cmp_ordered(&bld_base->base, pipe_func,
+ emit_data->args[0], emit_data->args[1]);
+ }
+ else {
+ cond = lp_build_cmp(&bld_base->base, pipe_func,
+ emit_data->args[0], emit_data->args[1]);
+
+ }
emit_data->output[emit_data->chan] = lp_build_select(&bld_base->base,
cond,
bld_base->base.one,
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- emit_data->output[emit_data->chan] = lp_build_shl(&bld_base->uint_bld,
- emit_data->args[0], emit_data->args[1]);
+ struct lp_build_context *uint_bld = &bld_base->uint_bld;
+ LLVMValueRef mask = lp_build_const_vec(uint_bld->gallivm, uint_bld->type,
+ uint_bld->type.width - 1);
+ LLVMValueRef masked_count = lp_build_and(uint_bld, emit_data->args[1], mask);
+ emit_data->output[emit_data->chan] = lp_build_shl(uint_bld, emit_data->args[0],
+ masked_count);
}
/* TGSI_OPCODE_SIN (CPU Only) */
LLVMValueRef div_mask = lp_build_cmp(&bld_base->uint_bld,
PIPE_FUNC_EQUAL, emit_data->args[1],
bld_base->uint_bld.zero);
- /* We want to make sure that we never divide/mod by zero to not
- * generate sigfpe. We don't want to crash just because the
+ /* We want to make sure that we never divide/mod by zero to not
+ * generate sigfpe. We don't want to crash just because the
* shader is doing something weird. */
LLVMValueRef divisor = LLVMBuildOr(builder,
div_mask,
emit_data->args[1], "");
LLVMValueRef result = lp_build_div(&bld_base->uint_bld,
emit_data->args[0], divisor);
- /* udiv by zero is guaranteed to return 0xffffffff */
+ /* udiv by zero is guaranteed to return 0xffffffff at least with d3d10 */
emit_data->output[emit_data->chan] = LLVMBuildOr(builder,
div_mask,
result, "");
struct lp_build_tgsi_context * bld_base,
struct lp_build_emit_data * emit_data)
{
- emit_data->output[emit_data->chan] = lp_build_shr(&bld_base->uint_bld,
- emit_data->args[0], emit_data->args[1]);
+ struct lp_build_context *uint_bld = &bld_base->uint_bld;
+ LLVMValueRef mask = lp_build_const_vec(uint_bld->gallivm, uint_bld->type,
+ uint_bld->type.width - 1);
+ LLVMValueRef masked_count = lp_build_and(uint_bld, emit_data->args[1], mask);
+ emit_data->output[emit_data->chan] = lp_build_shr(uint_bld, emit_data->args[0],
+ masked_count);
}
/* TGSI_OPCODE_ISLT (CPU Only) */
bld_base->op_actions[TGSI_OPCODE_ARL].emit = arl_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_ARR].emit = arr_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_CEIL].emit = ceil_emit_cpu;
- bld_base->op_actions[TGSI_OPCODE_CND].emit = cnd_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_COS].emit = cos_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_CMP].emit = cmp_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_DIV].emit = div_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_EX2].emit = ex2_emit_cpu;
- bld_base->op_actions[TGSI_OPCODE_EXP].emit = exp_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_F2I].emit = f2i_emit_cpu;
- bld_base->op_actions[TGSI_OPCODE_F2U].emit = f2u_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_FLR].emit = flr_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_FSEQ].emit = fseq_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_FSGE].emit = fsge_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_FSLT].emit = fslt_emit_cpu;
+ bld_base->op_actions[TGSI_OPCODE_FSNE].emit = fsne_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_I2F].emit = i2f_emit_cpu;
bld_base->op_actions[TGSI_OPCODE_IABS].emit = iabs_emit_cpu;