* SOFTWARE.
*/
+/* #define NV50_TGSI2NC_DEBUG */
+
/* XXX: need to clean this up so we get the typecasting right more naturally */
#include <unistd.h>
struct nv_basic_block *brkt_bb[BLD_MAX_LOOP_NESTING];
int loop_lvl;
+ ubyte out_kind; /* CFG_EDGE_FORWARD, or FAKE in case of BREAK/CONT */
+
struct bld_value_stack tvs[BLD_MAX_TEMPS][4]; /* TGSI_FILE_TEMPORARY */
struct bld_value_stack avs[BLD_MAX_ADDRS][4]; /* TGSI_FILE_ADDRESS */
struct bld_value_stack pvs[BLD_MAX_PREDS][4]; /* TGSI_FILE_PREDICATE */
return;
}
for (i = 0; i < b->num_in; ++i)
- if (b->in_kind[i] != CFG_EDGE_BACK)
+ if (!IS_WALL_EDGE(b->in_kind[i]))
fetch_by_bb(stack, vals, n, b->in[i]);
}
return phi->def[0];
}
+/* Insert a phi function in the loop header.
+ * For nested loops, we need to insert phi functions in all the outer
+ * loop headers if they don't have one yet.
+ *
+ * @def: redefinition from inside loop, or NULL if to be replaced later
+ */
static struct nv_value *
bld_loop_phi(struct bld_context *bld, struct bld_value_stack *stack,
struct nv_value *def)
{
- struct nv_basic_block *bb = bld->pc->current_block;
struct nv_instruction *phi;
- struct nv_value *val;
+ struct nv_basic_block *bb = bld->pc->current_block;
+ struct nv_value *val = NULL;
- val = bld_phi(bld, bld->pc->current_block, stack);
+ if (bld->loop_lvl > 1) {
+ --bld->loop_lvl;
+ if (!((stack->loop_def | stack->loop_use) & (1 << bld->loop_lvl)))
+ val = bld_loop_phi(bld, stack, NULL);
+ ++bld->loop_lvl;
+ }
+
+ if (!val)
+ val = bld_phi(bld, bld->pc->current_block, stack); /* old definition */
if (!val) {
bld->pc->current_block = bld->loop_bb[bld->loop_lvl - 1]->in[0];
-
val = bld_undef(bld, bld_stack_file(bld, stack));
}
static void
bld_loop_end(struct bld_context *bld, struct nv_basic_block *bb)
{
+ struct nv_basic_block *save = bld->pc->current_block;
struct nv_instruction *phi, *next;
struct nv_value *val;
struct bld_value_stack *stk;
- int s;
+ int i, s, n;
for (phi = bb->phi; phi && phi->opcode == NV_OP_PHI; phi = next) {
next = phi->next;
stk = (struct bld_value_stack *)phi->target;
phi->target = NULL;
- val = bld_fetch_global(bld, stk);
+ for (s = 1, n = 0; n < bb->num_in; ++n) {
+ if (bb->in_kind[n] != CFG_EDGE_BACK)
+ continue;
- nv_reference(bld->pc, &phi->src[1], val);
+ assert(s < 4);
+ bld->pc->current_block = bb->in[n];
+ val = bld_fetch_global(bld, stk);
+
+ for (i = 0; i < 4; ++i)
+ if (phi->src[i] && phi->src[i]->value == val)
+ break;
+ if (i == 4)
+ nv_reference(bld->pc, &phi->src[s++], val);
+ }
+ bld->pc->current_block = save;
- s = -1;
if (phi->src[0]->value == phi->def[0] ||
phi->src[0]->value == phi->src[1]->value)
s = 1;
else
if (phi->src[1]->value == phi->def[0])
s = 0;
+ else
+ continue;
if (s >= 0) {
+ /* eliminate the phi */
bld_vals_del_val(stk, phi->def[0]);
++bld->pc->pass_seq;
case TGSI_OPCODE_CEIL: return NV_OP_CEIL;
case TGSI_OPCODE_FLR: return NV_OP_FLOOR;
case TGSI_OPCODE_TRUNC: return NV_OP_TRUNC;
+ case TGSI_OPCODE_COS: return NV_OP_COS;
+ case TGSI_OPCODE_SIN: return NV_OP_SIN;
case TGSI_OPCODE_DDX: return NV_OP_DFDX;
case TGSI_OPCODE_DDY: return NV_OP_DFDY;
case TGSI_OPCODE_F2I:
for (i = 0; i < 128; ++i)
bld->saved_inputs[i] = NULL;
+
+ bld->out_kind = CFG_EDGE_FORWARD;
}
static struct nv_value *
static struct nv_value *
bld_interpolate(struct bld_context *bld, unsigned mode, struct nv_value *val)
{
+ if (val->reg.id == 255) {
+ /* gl_FrontFacing: 0/~0 to -1.0/+1.0 */
+ val = bld_insn_1(bld, NV_OP_LINTERP, val);
+ val = bld_insn_2(bld, NV_OP_SHL, val, bld_imm_u32(bld, 31));
+ val->insn->src[0]->typecast = NV_TYPE_U32;
+ val = bld_insn_2(bld, NV_OP_XOR, val, bld_imm_f32(bld, -1.0f));
+ val->insn->src[0]->typecast = NV_TYPE_U32;
+ } else
if (mode & (NV50_INTERP_LINEAR | NV50_INTERP_FLAT))
val = bld_insn_1(bld, NV_OP_LINTERP, val);
else
case TGSI_FILE_IMMEDIATE:
assert(idx < bld->ti->immd32_nr);
res = bld_load_imm_u32(bld, bld->ti->immd32[idx * 4 + swz]);
- res->reg.type = type;
+
+ switch (bld->ti->immd32_ty[idx]) {
+ case TGSI_IMM_FLOAT32: res->reg.type = NV_TYPE_F32; break;
+ case TGSI_IMM_UINT32: res->reg.type = NV_TYPE_U32; break;
+ case TGSI_IMM_INT32: res->reg.type = NV_TYPE_S32; break;
+ default:
+ res->reg.type = type;
+ break;
+ }
break;
case TGSI_FILE_INPUT:
res = bld_saved_input(bld, idx, swz);
} else {
assert(src->Dimension.Dimension == 0);
res = bld_insn_1(bld, NV_OP_LDA, res);
+ assert(res->reg.type == type);
}
- assert(res->reg.type == type);
-
bld->saved_inputs[bld->ti->input_map[idx][swz]] = res;
break;
case TGSI_FILE_TEMPORARY:
abort();
break;
}
- if (!res) {
- debug_printf("WARNING: undefined source value in TGSI instruction\n");
- return bld_load_imm_u32(bld, 0);
- }
+ if (!res)
+ return bld_undef(bld, NV_FILE_GPR);
switch (tgsi_util_get_full_src_register_sign_mode(src, chan)) {
case TGSI_UTIL_SIGN_KEEP:
static void
load_proj_tex_coords(struct bld_context *bld,
- struct nv_value *t[4], int dim,
- const struct tgsi_full_instruction *insn)
+ struct nv_value *t[4], int dim,
+ const struct tgsi_full_instruction *insn)
{
int c, mask = 0;
}
}
+/* For a quad of threads / top left, top right, bottom left, bottom right
+ * pixels, do a different operation, and take src0 from a specific thread.
+ */
+#define QOP_ADD 0
+#define QOP_SUBR 1
+#define QOP_SUB 2
+#define QOP_MOV1 3
+
+#define QOP(a, b, c, d) \
+ ((QOP_##a << 0) | (QOP_##b << 2) | (QOP_##c << 4) | (QOP_##d << 6))
+
+static INLINE struct nv_value *
+bld_quadop(struct bld_context *bld, ubyte qop, struct nv_value *src0, int lane,
+ struct nv_value *src1, boolean wp)
+{
+ struct nv_value *val = bld_insn_2(bld, NV_OP_QUADOP, src0, src1);
+ val->insn->lanes = lane;
+ val->insn->quadop = qop;
+ if (wp) {
+ val->insn->flags_def = new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16);
+ val->insn->flags_def->insn = val->insn;
+ }
+ return val;
+}
+
+static INLINE struct nv_value *
+bld_cmov(struct bld_context *bld,
+ struct nv_value *src, ubyte cc, struct nv_value *cr)
+{
+ src = bld_insn_1(bld, NV_OP_MOV, src);
+
+ src->insn->cc = cc;
+ src->insn->flags_src = new_ref(bld->pc, cr);
+
+ return src;
+}
+
+static struct nv_instruction *
+emit_tex(struct bld_context *bld, uint opcode,
+ struct nv_value *dst[4], struct nv_value *t_in[4],
+ int argc, int tic, int tsc, int cube)
+{
+ struct nv_value *t[4];
+ struct nv_instruction *nvi;
+ int c;
+
+ /* the inputs to a tex instruction must be separate values */
+ for (c = 0; c < argc; ++c) {
+ t[c] = bld_insn_1(bld, NV_OP_MOV, t_in[c]);
+ t[c]->reg.type = NV_TYPE_F32;
+ t[c]->insn->fixed = 1;
+ }
+
+ nvi = new_instruction(bld->pc, opcode);
+
+ for (c = 0; c < 4; ++c)
+ dst[c] = bld_def(nvi, c, new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32));
+
+ for (c = 0; c < argc; ++c)
+ nvi->src[c] = new_ref(bld->pc, t[c]);
+
+ nvi->tex_t = tic;
+ nvi->tex_s = tsc;
+ nvi->tex_mask = 0xf;
+ nvi->tex_cube = cube;
+ nvi->tex_live = 0;
+ nvi->tex_argc = argc;
+
+ return nvi;
+}
+
+static void
+bld_texlod_sequence(struct bld_context *bld,
+ struct nv_value *dst[4], struct nv_value *t[4], int arg,
+ int tic, int tsc, int cube)
+{
+ emit_tex(bld, NV_OP_TXL, dst, t, arg, tic, tsc, cube); /* TODO */
+}
+
+
+/* The lanes of a quad are grouped by the bit in the condition register
+ * they have set, which is selected by differing bias values.
+ * Move the input values for TEX into a new register set for each group
+ * and execute TEX only for a specific group.
+ * We always need to use 4 new registers for the inputs/outputs because
+ * the implicitly calculated derivatives must be correct.
+ */
+static void
+bld_texbias_sequence(struct bld_context *bld,
+ struct nv_value *dst[4], struct nv_value *t[4], int arg,
+ int tic, int tsc, int cube)
+{
+ struct nv_instruction *sel, *tex;
+ struct nv_value *bit[4], *cr[4], *res[4][4], *val;
+ int l, c;
+
+ const ubyte cc[4] = { NV_CC_EQ, NV_CC_S, NV_CC_C, NV_CC_O };
+
+ for (l = 0; l < 4; ++l) {
+ bit[l] = bld_load_imm_u32(bld, 1 << l);
+
+ val = bld_quadop(bld, QOP(SUBR, SUBR, SUBR, SUBR),
+ t[arg - 1], l, t[arg - 1], TRUE);
+
+ cr[l] = bld_cmov(bld, bit[l], NV_CC_EQ, val->insn->flags_def);
+
+ cr[l]->reg.file = NV_FILE_FLAGS;
+ cr[l]->reg.type = NV_TYPE_U16;
+ }
+
+ sel = new_instruction(bld->pc, NV_OP_SELECT);
+
+ for (l = 0; l < 4; ++l)
+ sel->src[l] = new_ref(bld->pc, cr[l]);
+
+ bld_def(sel, 0, new_value(bld->pc, NV_FILE_FLAGS, NV_TYPE_U16));
+
+ for (l = 0; l < 4; ++l) {
+ tex = emit_tex(bld, NV_OP_TXB, dst, t, arg, tic, tsc, cube);
+
+ tex->cc = cc[l];
+ tex->flags_src = new_ref(bld->pc, sel->def[0]);
+
+ for (c = 0; c < 4; ++c)
+ res[l][c] = tex->def[c];
+ }
+
+ for (l = 0; l < 4; ++l)
+ for (c = 0; c < 4; ++c)
+ res[l][c] = bld_cmov(bld, res[l][c], cc[l], sel->def[0]);
+
+ for (c = 0; c < 4; ++c) {
+ sel = new_instruction(bld->pc, NV_OP_SELECT);
+
+ for (l = 0; l < 4; ++l)
+ sel->src[l] = new_ref(bld->pc, res[l][c]);
+
+ bld_def(sel, 0, (dst[c] = new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32)));
+ }
+}
+
+static boolean
+bld_is_constant(struct nv_value *val)
+{
+ if (val->reg.file == NV_FILE_IMM)
+ return TRUE;
+ return val->insn && nvcg_find_constant(val->insn->src[0]);
+}
+
static void
bld_tex(struct bld_context *bld, struct nv_value *dst0[4],
const struct tgsi_full_instruction *insn)
{
- struct nv_value *t[4];
- struct nv_instruction *nvi;
+ struct nv_value *t[4], *s[3];
uint opcode = translate_opcode(insn->Instruction.Opcode);
int arg, dim, c;
+ const int tic = insn->Src[1].Register.Index;
+ const int tsc = 0;
+ const int cube = (insn->Texture.Texture == TGSI_TEXTURE_CUBE) ? 1 : 0;
get_tex_dim(insn, &dim, &arg);
- if (insn->Texture.Texture == TGSI_TEXTURE_CUBE) {
- }
- // else
- if (insn->Instruction.Opcode == TGSI_OPCODE_TXP) {
+ if (!cube && insn->Instruction.Opcode == TGSI_OPCODE_TXP)
load_proj_tex_coords(bld, t, dim, insn);
- } else
+ else
for (c = 0; c < dim; ++c)
t[c] = emit_fetch(bld, insn, 0, c);
+ if (cube) {
+ assert(dim >= 3);
+ for (c = 0; c < 3; ++c)
+ s[c] = bld_insn_1(bld, NV_OP_ABS, t[c]);
+
+ s[0] = bld_insn_2(bld, NV_OP_MAX, s[0], s[1]);
+ s[0] = bld_insn_2(bld, NV_OP_MAX, s[0], s[2]);
+ s[0] = bld_insn_1(bld, NV_OP_RCP, s[0]);
+
+ for (c = 0; c < 3; ++c)
+ t[c] = bld_insn_2(bld, NV_OP_MUL, t[c], s[0]);
+ }
+
if (arg != dim)
t[dim] = emit_fetch(bld, insn, 0, 2);
- if (insn->Instruction.Opcode == TGSI_OPCODE_TXB ||
- insn->Instruction.Opcode == TGSI_OPCODE_TXL) {
+ if (opcode == NV_OP_TXB || opcode == NV_OP_TXL) {
t[arg++] = emit_fetch(bld, insn, 0, 3);
- }
- for (c = 0; c < arg; ++c) {
- t[c] = bld_insn_1(bld, NV_OP_MOV, t[c]);
- t[c]->reg.type = NV_TYPE_F32;
+ if ((bld->ti->p->type == PIPE_SHADER_FRAGMENT) &&
+ !bld_is_constant(t[arg - 1])) {
+ if (opcode == NV_OP_TXB)
+ bld_texbias_sequence(bld, dst0, t, arg, tic, tsc, cube);
+ else
+ bld_texlod_sequence(bld, dst0, t, arg, tic, tsc, cube);
+ return;
+ }
}
- nvi = new_instruction(bld->pc, opcode);
+ emit_tex(bld, opcode, dst0, t, arg, tic, tsc, cube);
+}
- for (c = 0; c < 4; ++c) {
- nvi->def[c] = dst0[c] = new_value(bld->pc, NV_FILE_GPR, NV_TYPE_F32);
- nvi->def[c]->insn = nvi;
- }
- for (c = 0; c < arg; ++c)
- nvi->src[c] = new_ref(bld->pc, t[c]);
+static INLINE struct nv_value *
+bld_dot(struct bld_context *bld, const struct tgsi_full_instruction *insn,
+ int n)
+{
+ struct nv_value *dotp, *src0, *src1;
+ int c;
- nvi->tex_t = insn->Src[1].Register.Index;
- nvi->tex_s = 0;
- nvi->tex_mask = 0xf;
- nvi->tex_cube = (insn->Texture.Texture == TGSI_TEXTURE_CUBE) ? 1 : 0;
- nvi->tex_live = 0;
- nvi->tex_argc = arg;
+ src0 = emit_fetch(bld, insn, 0, 0);
+ src1 = emit_fetch(bld, insn, 1, 0);
+ dotp = bld_insn_2(bld, NV_OP_MUL, src0, src1);
+
+ for (c = 1; c < n; ++c) {
+ src0 = emit_fetch(bld, insn, 0, c);
+ src1 = emit_fetch(bld, insn, 1, c);
+ dotp = bld_insn_3(bld, NV_OP_MAD, src0, src1, dotp);
+ }
+ return dotp;
}
#define FOR_EACH_DST0_ENABLED_CHANNEL(chan, inst) \
int c;
uint opcode = translate_opcode(insn->Instruction.Opcode);
- tgsi_dump_instruction(insn, 1);
+#ifdef NV50_TGSI2NC_DEBUG
+ debug_printf("bld_instruction:"); tgsi_dump_instruction(insn, 1);
+#endif
switch (insn->Instruction.Opcode) {
case TGSI_OPCODE_ADD:
dst0[c] = bld_insn_2(bld, opcode, src0, src1);
}
break;
+ case TGSI_OPCODE_ARL:
+ src1 = bld_imm_u32(bld, 4);
+ FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
+ src0 = emit_fetch(bld, insn, 0, c);
+ (temp = bld_insn_1(bld, NV_OP_FLOOR, src0))->reg.type = NV_TYPE_S32;
+ dst0[c] = bld_insn_2(bld, NV_OP_SHL, temp, src1);
+ }
+ break;
case TGSI_OPCODE_CMP:
FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
src0 = emit_fetch(bld, insn, 0, c);
}
break;
case TGSI_OPCODE_COS:
+ case TGSI_OPCODE_SIN:
src0 = emit_fetch(bld, insn, 0, 0);
temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
if (insn->Dst[0].Register.WriteMask & 7)
- temp = bld_insn_1(bld, NV_OP_COS, temp);
+ temp = bld_insn_1(bld, opcode, temp);
for (c = 0; c < 3; ++c)
if (insn->Dst[0].Register.WriteMask & (1 << c))
dst0[c] = temp;
if (!(insn->Dst[0].Register.WriteMask & (1 << 3)))
break;
- /* XXX: if src0.x is src0.w, don't emit new insns */
src0 = emit_fetch(bld, insn, 0, 3);
temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
- dst0[3] = bld_insn_1(bld, NV_OP_COS, temp);
+ dst0[3] = bld_insn_1(bld, opcode, temp);
+ break;
+ case TGSI_OPCODE_DP2:
+ temp = bld_dot(bld, insn, 2);
+ FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
+ dst0[c] = temp;
break;
case TGSI_OPCODE_DP3:
- src0 = emit_fetch(bld, insn, 0, 0);
- src1 = emit_fetch(bld, insn, 1, 0);
- temp = bld_insn_2(bld, NV_OP_MUL, src0, src1);
- for (c = 1; c < 3; ++c) {
- src0 = emit_fetch(bld, insn, 0, c);
- src1 = emit_fetch(bld, insn, 1, c);
- temp = bld_insn_3(bld, NV_OP_MAD, src0, src1, temp);
- }
+ temp = bld_dot(bld, insn, 3);
FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
dst0[c] = temp;
break;
case TGSI_OPCODE_DP4:
- src0 = emit_fetch(bld, insn, 0, 0);
- src1 = emit_fetch(bld, insn, 1, 0);
- temp = bld_insn_2(bld, NV_OP_MUL, src0, src1);
- for (c = 1; c < 4; ++c) {
- src0 = emit_fetch(bld, insn, 0, c);
- src1 = emit_fetch(bld, insn, 1, c);
- temp = bld_insn_3(bld, NV_OP_MAD, src0, src1, temp);
- }
+ temp = bld_dot(bld, insn, 4);
FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
dst0[c] = temp;
break;
+ case TGSI_OPCODE_DPH:
+ src0 = bld_dot(bld, insn, 3);
+ src1 = emit_fetch(bld, insn, 1, 3);
+ temp = bld_insn_2(bld, NV_OP_ADD, src0, src1);
+ FOR_EACH_DST0_ENABLED_CHANNEL(c, insn)
+ dst0[c] = temp;
+ break;
+ case TGSI_OPCODE_DST:
+ if (insn->Dst[0].Register.WriteMask & 1)
+ dst0[0] = bld_imm_f32(bld, 1.0f);
+ if (insn->Dst[0].Register.WriteMask & 2) {
+ src0 = emit_fetch(bld, insn, 0, 1);
+ src1 = emit_fetch(bld, insn, 1, 1);
+ dst0[1] = bld_insn_2(bld, NV_OP_MUL, src0, src1);
+ }
+ if (insn->Dst[0].Register.WriteMask & 4)
+ dst0[2] = emit_fetch(bld, insn, 0, 2);
+ if (insn->Dst[0].Register.WriteMask & 8)
+ dst0[3] = emit_fetch(bld, insn, 1, 3);
+ break;
case TGSI_OPCODE_EX2:
src0 = emit_fetch(bld, insn, 0, 0);
temp = bld_insn_1(bld, NV_OP_PREEX2, src0);
bld_kil(bld, src0);
}
break;
+ case TGSI_OPCODE_KILP:
+ (new_instruction(bld->pc, NV_OP_KIL))->fixed = 1;
+ break;
case TGSI_OPCODE_IF:
{
struct nv_basic_block *b = new_basic_block(bld->pc);
src1 = bld_predicate(bld, emit_fetch(bld, insn, 0, 0), TRUE);
- bld_flow(bld, NV_OP_BRA, NV_CC_EQ, src1, NULL, FALSE);
+ bld_flow(bld, NV_OP_BRA, NV_CC_EQ, src1, NULL, (bld->cond_lvl == 0));
++bld->cond_lvl;
bld_new_block(bld, b);
struct nv_basic_block *b = new_basic_block(bld->pc);
--bld->cond_lvl;
- nvbb_attach_block(bld->pc->current_block, b, CFG_EDGE_FORWARD);
+ nvbb_attach_block(bld->pc->current_block, b, bld->out_kind);
nvbb_attach_block(bld->cond_bb[bld->cond_lvl], b, CFG_EDGE_FORWARD);
bld->cond_bb[bld->cond_lvl]->exit->target = b;
- if (0 && bld->join_bb[bld->cond_lvl]) {
- bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
+ bld_new_block(bld, b);
- new_instruction(bld->pc, NV_OP_NOP)->is_join = TRUE;
+ if (!bld->cond_lvl && bld->join_bb[bld->cond_lvl]) {
+ bld->join_bb[bld->cond_lvl]->exit->prev->target = b;
+ new_instruction(bld->pc, NV_OP_JOIN)->is_join = TRUE;
}
-
- bld_new_block(bld, b);
}
break;
case TGSI_OPCODE_BGNLOOP:
bld_flow(bld, NV_OP_BREAK, NV_CC_TR, NULL, bb, FALSE);
- /* XXX: don't do this for redundant BRKs */
- nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
+ if (bld->out_kind == CFG_EDGE_FORWARD) /* else we already had BRK/CONT */
+ nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_LOOP_LEAVE);
+
+ bld->out_kind = CFG_EDGE_FAKE;
}
break;
case TGSI_OPCODE_CONT:
bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
nvbb_attach_block(bld->pc->current_block, bb, CFG_EDGE_BACK);
+
+ if ((bb = bld->join_bb[bld->cond_lvl - 1])) {
+ bld->join_bb[bld->cond_lvl - 1] = NULL;
+ nv_nvi_delete(bb->exit->prev);
+ }
+ bld->out_kind = CFG_EDGE_FAKE;
}
break;
case TGSI_OPCODE_ENDLOOP:
{
- struct nv_basic_block *bb = bld->loop_bb[--bld->loop_lvl];
+ struct nv_basic_block *bb = bld->loop_bb[bld->loop_lvl - 1];
bld_flow(bld, NV_OP_BRA, NV_CC_TR, NULL, bb, FALSE);
bld_loop_end(bld, bb); /* replace loop-side operand of the phis */
- bld_new_block(bld, bld->brkt_bb[bld->loop_lvl]);
+ bld_new_block(bld, bld->brkt_bb[--bld->loop_lvl]);
}
break;
case TGSI_OPCODE_ABS:
dst0[c]->reg.type = NV_TYPE_F32;
}
break;
+ case TGSI_OPCODE_SCS:
+ if (insn->Dst[0].Register.WriteMask & 0x3) {
+ src0 = emit_fetch(bld, insn, 0, 0);
+ temp = bld_insn_1(bld, NV_OP_PRESIN, src0);
+ if (insn->Dst[0].Register.WriteMask & 0x1)
+ dst0[0] = bld_insn_1(bld, NV_OP_COS, temp);
+ if (insn->Dst[0].Register.WriteMask & 0x2)
+ dst0[1] = bld_insn_1(bld, NV_OP_SIN, temp);
+ }
+ if (insn->Dst[0].Register.WriteMask & 0x4)
+ dst0[2] = bld_imm_f32(bld, 0.0f);
+ if (insn->Dst[0].Register.WriteMask & 0x8)
+ dst0[3] = bld_imm_f32(bld, 1.0f);
+ break;
+ case TGSI_OPCODE_SSG:
+ FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
+ src0 = emit_fetch(bld, insn, 0, c);
+ src1 = bld_predicate(bld, src0, FALSE);
+ temp = bld_insn_2(bld, NV_OP_AND, src0, bld_imm_u32(bld, 0x80000000));
+ temp = bld_insn_2(bld, NV_OP_OR, temp, bld_imm_f32(bld, 1.0f));
+ dst0[c] = bld_insn_2(bld, NV_OP_XOR, temp, temp);
+ dst0[c]->insn->cc = NV_CC_EQ;
+ nv_reference(bld->pc, &dst0[c]->insn->flags_src, src1);
+ }
+ break;
case TGSI_OPCODE_SUB:
FOR_EACH_DST0_ENABLED_CHANNEL(c, insn) {
src0 = emit_fetch(bld, insn, 0, c);
dst0[c]->insn->src[2]->mod ^= NV_MOD_NEG;
}
break;
+ case TGSI_OPCODE_RET:
+ (new_instruction(bld->pc, NV_OP_RET))->fixed = 1;
+ break;
case TGSI_OPCODE_END:
if (bld->ti->p->type == PIPE_SHADER_FRAGMENT)
bld_export_outputs(bld);
break;
default:
- NOUVEAU_ERR("nv_bld: unhandled opcode %u\n", insn->Instruction.Opcode);
+ NOUVEAU_ERR("unhandled opcode %u\n", insn->Instruction.Opcode);
abort();
break;
}
{
struct nv_instruction *nvi;
- for (nvi = b->entry; nvi; nvi = nvi->next) {
+ for (nvi = b->phi ? b->phi : b->entry; nvi; nvi = nvi->next) {
int s;
for (s = 0; s < 5; ++s) {
if (!nvi->src[s])