#include "util/u_math.h"
#include "ir3.h"
-#include "ir3_compiler.h"
+#include "ir3_shader.h"
/*
* Legalize:
*
- * We currently require that scheduling ensures that we have enough nop's
- * in all the right places. The legalize step mostly handles fixing up
- * instruction flags ((ss)/(sy)/(ei)), and collapses sequences of nop's
- * into fewer nop's w/ rpt flag.
+ * The legalize pass handles ensuring sufficient nop's and sync flags for
+ * correct execution.
+ *
+ * 1) Iteratively determine where sync ((sy)/(ss)) flags are needed,
+ * based on state flowing out of predecessor blocks until there is
+ * no further change. In some cases this requires inserting nops.
+ * 2) Mark (ei) on last varying input, and (ul) on last use of a0.x
+ * 3) Final nop scheduling for instruction latency
+ * 4) Resolve jumps and schedule blocks, marking potential convergence
+ * points with (jp)
*/
struct ir3_legalize_ctx {
struct ir3_legalize_state *state = &bd->state;
bool last_input_needs_ss = false;
bool has_tex_prefetch = false;
+ bool mergedregs = ctx->so->mergedregs;
/* our input state is the OR of all predecessor blocks' state: */
set_foreach(block->predecessors, entry) {
list_inithead(&block->instr_list);
foreach_instr_safe (n, &instr_list) {
- struct ir3_register *reg;
unsigned i;
n->flags &= ~(IR3_INSTR_SS | IR3_INSTR_SY);
if (last_n && is_barrier(last_n)) {
n->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
last_input_needs_ss = false;
- regmask_init(&state->needs_ss_war);
- regmask_init(&state->needs_ss);
- regmask_init(&state->needs_sy);
+ regmask_init(&state->needs_ss_war, mergedregs);
+ regmask_init(&state->needs_ss, mergedregs);
+ regmask_init(&state->needs_sy, mergedregs);
}
- if (last_n && (last_n->opc == OPC_IF)) {
+ if (last_n && (last_n->opc == OPC_PREDT)) {
n->flags |= IR3_INSTR_SS;
- regmask_init(&state->needs_ss_war);
- regmask_init(&state->needs_ss);
+ regmask_init(&state->needs_ss_war, mergedregs);
+ regmask_init(&state->needs_ss, mergedregs);
}
/* NOTE: consider dst register too.. it could happen that
* resulting in undefined results:
*/
for (i = 0; i < n->regs_count; i++) {
- reg = n->regs[i];
+ struct ir3_register *reg = n->regs[i];
if (reg_gpr(reg)) {
if (regmask_get(&state->needs_ss, reg)) {
n->flags |= IR3_INSTR_SS;
last_input_needs_ss = false;
- regmask_init(&state->needs_ss_war);
- regmask_init(&state->needs_ss);
+ regmask_init(&state->needs_ss_war, mergedregs);
+ regmask_init(&state->needs_ss, mergedregs);
}
if (regmask_get(&state->needs_sy, reg)) {
n->flags |= IR3_INSTR_SY;
- regmask_init(&state->needs_sy);
+ regmask_init(&state->needs_sy, mergedregs);
}
}
}
if (n->regs_count > 0) {
- reg = n->regs[0];
+ struct ir3_register *reg = n->regs[0];
if (regmask_get(&state->needs_ss_war, reg)) {
n->flags |= IR3_INSTR_SS;
last_input_needs_ss = false;
- regmask_init(&state->needs_ss_war);
- regmask_init(&state->needs_ss);
+ regmask_init(&state->needs_ss_war, mergedregs);
+ regmask_init(&state->needs_ss, mergedregs);
}
if (last_rel && (reg->num == regid(REG_A0, 0))) {
ctx->type == MESA_SHADER_VERTEX && n->opc == OPC_SAMGQ) {
struct ir3_instruction *samgp;
+ list_delinit(&n->node);
+
for (i = 0; i < 4; i++) {
samgp = ir3_instr_clone(n);
samgp->opc = OPC_SAMGP0 + i;
if (i > 1)
samgp->flags |= IR3_INSTR_SY;
}
- list_delinit(&n->node);
} else {
list_addtail(&n->node, &block->instr_list);
}
- if (n->opc == OPC_DSXPP_1 || n->opc == OPC_DSYPP_1) {
- struct ir3_instruction *op_p = ir3_instr_clone(n);
- op_p->flags = IR3_INSTR_P;
-
- ctx->so->need_fine_derivatives = true;
- }
-
if (is_sfu(n))
regmask_set(&state->needs_ss, n->regs[0]);
- if (is_tex(n) || (n->opc == OPC_META_TEX_PREFETCH)) {
+ if (is_tex_or_prefetch(n)) {
regmask_set(&state->needs_sy, n->regs[0]);
- ctx->so->need_pixlod = true;
if (n->opc == OPC_META_TEX_PREFETCH)
has_tex_prefetch = true;
} else if (n->opc == OPC_RESINFO) {
* their src register(s):
*/
if (is_tex(n) || is_sfu(n) || is_mem(n)) {
- foreach_src(reg, n) {
+ foreach_src (reg, n) {
if (reg_gpr(reg))
regmask_set(&state->needs_ss_war, reg);
}
ir3_reg_create(baryf, regid(0, 0), 0);
/* insert the dummy bary.f after last_input: */
- list_delinit(&baryf->node);
- list_add(&baryf->node, &last_input->node);
+ ir3_instr_move_after(baryf, last_input);
last_input = baryf;
return true;
}
+/* Expands dsxpp and dsypp macros to:
+ *
+ * dsxpp.1 dst, src
+ * dsxpp.1.p dst, src
+ *
+ * We apply this after flags syncing, as we don't want to sync in between the
+ * two (which might happen if dst == src). We do it before nop scheduling
+ * because that needs to count actual instructions.
+ */
+static bool
+apply_fine_deriv_macro(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
+{
+ struct list_head instr_list;
+
+ /* remove all the instructions from the list, we'll be adding
+ * them back in as we go
+ */
+ list_replace(&block->instr_list, &instr_list);
+ list_inithead(&block->instr_list);
+
+ foreach_instr_safe (n, &instr_list) {
+ list_addtail(&n->node, &block->instr_list);
+
+ if (n->opc == OPC_DSXPP_MACRO || n->opc == OPC_DSYPP_MACRO) {
+ n->opc = (n->opc == OPC_DSXPP_MACRO) ? OPC_DSXPP_1 : OPC_DSYPP_1;
+
+ struct ir3_instruction *op_p = ir3_instr_clone(n);
+ op_p->flags = IR3_INSTR_P;
+
+ ctx->so->need_fine_derivatives = true;
+ }
+ }
+
+ return true;
+}
+
/* NOTE: branch instructions are always the last instruction(s)
* in the block. We take advantage of this as we resolve the
* branches, since "if (foo) break;" constructs turn into
/* create "else" branch first (since "then" block should
* frequently/always end up being a fall-thru):
*/
- br = ir3_BR(block, block->condition, 0);
+ br = ir3_B(block, block->condition, 0);
br->cat0.inv = true;
br->cat0.target = block->successors[1];
/* "then" branch: */
- br = ir3_BR(block, block->condition, 0);
+ br = ir3_B(block, block->condition, 0);
br->cat0.target = block->successors[0];
} else if (block->successors[0]) {
}
}
+/* Here we workaround the fact that kill doesn't actually kill the thread as
+ * GL expects. The last instruction always needs to be an end instruction,
+ * which means that if we're stuck in a loop where kill is the only way out,
+ * then we may have to jump out to the end. kill may also have the d3d
+ * semantics of converting the thread to a helper thread, rather than setting
+ * the exec mask to 0, in which case the helper thread could get stuck in an
+ * infinite loop.
+ *
+ * We do this late, both to give the scheduler the opportunity to reschedule
+ * kill instructions earlier and to avoid having to create a separate basic
+ * block.
+ *
+ * TODO: Assuming that the wavefront doesn't stop as soon as all threads are
+ * killed, we might benefit by doing this more aggressively when the remaining
+ * part of the program after the kill is large, since that would let us
+ * skip over the instructions when there are no non-killed threads left.
+ */
+static void
+kill_sched(struct ir3 *ir, struct ir3_shader_variant *so)
+{
+ /* True if we know that this block will always eventually lead to the end
+ * block:
+ */
+ bool always_ends = true;
+ bool added = false;
+ struct ir3_block *last_block =
+ list_last_entry(&ir->block_list, struct ir3_block, node);
+
+ foreach_block_rev (block, &ir->block_list) {
+ for (unsigned i = 0; i < 2 && block->successors[i]; i++) {
+ if (block->successors[i]->start_ip <= block->end_ip)
+ always_ends = false;
+ }
+
+ if (always_ends)
+ continue;
+
+ foreach_instr_safe (instr, &block->instr_list) {
+ if (instr->opc != OPC_KILL)
+ continue;
+
+ struct ir3_instruction *br = ir3_instr_create(block, OPC_B);
+ br->regs[1] = instr->regs[1];
+ br->cat0.target =
+ list_last_entry(&ir->block_list, struct ir3_block, node);
+
+ list_del(&br->node);
+ list_add(&br->node, &instr->node);
+
+ added = true;
+ }
+ }
+
+ if (added) {
+ /* I'm not entirely sure how the branchstack works, but we probably
+ * need to add at least one entry for the divergence which is resolved
+ * at the end:
+ */
+ so->branchstack++;
+
+ /* We don't update predecessors/successors, so we have to do this
+ * manually:
+ */
+ mark_jp(last_block);
+ }
+}
+
/* Insert nop's required to make this a legal/valid shader program: */
static void
nop_sched(struct ir3 *ir)
*/
if ((delay > 0) && (ir->compiler->gpu_id >= 600) && last &&
- ((opc_cat(last->opc) == 2) || (opc_cat(last->opc) == 3))) {
+ ((opc_cat(last->opc) == 2) || (opc_cat(last->opc) == 3)) &&
+ (last->repeat == 0)) {
/* the previous cat2/cat3 instruction can encode at most 3 nop's: */
unsigned transfer = MIN2(delay, 3 - last->nop);
last->nop += transfer;
}
}
-void
+bool
ir3_legalize(struct ir3 *ir, struct ir3_shader_variant *so, int *max_bary)
{
struct ir3_legalize_ctx *ctx = rzalloc(ir, struct ir3_legalize_ctx);
+ bool mergedregs = so->mergedregs;
bool progress;
ctx->so = so;
/* allocate per-block data: */
foreach_block (block, &ir->block_list) {
- block->data = rzalloc(ctx, struct ir3_legalize_block_data);
+ struct ir3_legalize_block_data *bd =
+ rzalloc(ctx, struct ir3_legalize_block_data);
+
+ regmask_init(&bd->state.needs_ss_war, mergedregs);
+ regmask_init(&bd->state.needs_ss, mergedregs);
+ regmask_init(&bd->state.needs_sy, mergedregs);
+
+ block->data = bd;
}
ir3_remove_nops(ir);
*max_bary = ctx->max_bary;
block_sched(ir);
+ if (so->type == MESA_SHADER_FRAGMENT)
+ kill_sched(ir, so);
+
+ foreach_block (block, &ir->block_list) {
+ progress |= apply_fine_deriv_macro(ctx, block);
+ }
+
nop_sched(ir);
do {
mark_xvergence_points(ir);
ralloc_free(ctx);
+
+ return true;
}