struct ir3_legalize_ctx {
struct ir3_compiler *compiler;
+ struct ir3_shader_variant *so;
gl_shader_stage type;
- bool has_ssbo;
- bool need_pixlod;
int max_bary;
};
list_replace(&block->instr_list, &instr_list);
list_inithead(&block->instr_list);
- list_for_each_entry_safe (struct ir3_instruction, n, &instr_list, node) {
+ foreach_instr_safe (n, &instr_list) {
struct ir3_register *reg;
unsigned i;
regmask_init(&state->needs_sy);
}
- if (last_n && (last_n->opc == OPC_CONDEND)) {
+ if (last_n && (last_n->opc == OPC_IF)) {
n->flags |= IR3_INSTR_SS;
regmask_init(&state->needs_ss_war);
regmask_init(&state->needs_ss);
if (list_is_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
ir3_NOP(block);
- if (is_nop(n) && !list_is_empty(&block->instr_list)) {
- struct ir3_instruction *last = list_last_entry(&block->instr_list,
- struct ir3_instruction, node);
- if (is_nop(last) && (last->repeat < 5)) {
- last->repeat++;
- last->flags |= n->flags;
- continue;
- }
-
- /* NOTE: I think the nopN encoding works for a5xx and
- * probably a4xx, but not a3xx. So far only tested on
- * a6xx.
- */
- if ((ctx->compiler->gpu_id >= 600) && !n->flags && (last->nop < 3) &&
- ((opc_cat(last->opc) == 2) || (opc_cat(last->opc) == 3))) {
- last->nop++;
- continue;
- }
- }
-
if (ctx->compiler->samgq_workaround &&
ctx->type == MESA_SHADER_VERTEX && n->opc == OPC_SAMGQ) {
struct ir3_instruction *samgp;
+ list_delinit(&n->node);
+
for (i = 0; i < 4; i++) {
samgp = ir3_instr_clone(n);
samgp->opc = OPC_SAMGP0 + i;
if (i > 1)
samgp->flags |= IR3_INSTR_SY;
}
- list_delinit(&n->node);
} else {
list_addtail(&n->node, &block->instr_list);
}
+ if (n->opc == OPC_DSXPP_1 || n->opc == OPC_DSYPP_1) {
+ struct ir3_instruction *op_p = ir3_instr_clone(n);
+ op_p->flags = IR3_INSTR_P;
+
+ ctx->so->need_fine_derivatives = true;
+ }
+
if (is_sfu(n))
regmask_set(&state->needs_ss, n->regs[0]);
- if (is_tex(n) || (n->opc == OPC_META_TEX_PREFETCH)) {
+ if (is_tex_or_prefetch(n)) {
regmask_set(&state->needs_sy, n->regs[0]);
- ctx->need_pixlod = true;
if (n->opc == OPC_META_TEX_PREFETCH)
has_tex_prefetch = true;
} else if (n->opc == OPC_RESINFO) {
}
if (is_ssbo(n->opc) || (is_atomic(n->opc) && (n->flags & IR3_INSTR_G)))
- ctx->has_ssbo = true;
+ ctx->so->has_ssbo = true;
/* both tex/sfu appear to not always immediately consume
* their src register(s):
*/
if (is_tex(n) || is_sfu(n) || is_mem(n)) {
- foreach_src(reg, n) {
+ foreach_src (reg, n) {
if (reg_gpr(reg))
regmask_set(&state->needs_ss_war, reg);
}
static bool
resolve_jumps(struct ir3 *ir)
{
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node)
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node)
+ foreach_block (block, &ir->block_list)
+ foreach_instr (instr, &block->instr_list)
if (is_flow(instr) && instr->cat0.target)
if (resolve_jump(instr))
return true;
static void
mark_xvergence_points(struct ir3 *ir)
{
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
if (block->predecessors->entries > 1) {
/* if a block has more than one possible predecessor, then
* the first instruction is a convergence point.
}
}
+/* Insert the branch/jump instructions for flow control between blocks.
+ * Initially this is done naively, without considering if the successor
+ * block immediately follows the current block (ie. so no jump required),
+ * but that is cleaned up in resolve_jumps().
+ *
+ * TODO what ensures that the last write to p0.x in a block is the
+ * branch condition? Have we been getting lucky all this time?
+ */
+static void
+block_sched(struct ir3 *ir)
+{
+ foreach_block (block, &ir->block_list) {
+ if (block->successors[1]) {
+ /* if/else, conditional branches to "then" or "else": */
+ struct ir3_instruction *br;
+
+ debug_assert(block->condition);
+
+ /* create "else" branch first (since "then" block should
+ * frequently/always end up being a fall-thru):
+ */
+ br = ir3_BR(block, block->condition, 0);
+ br->cat0.inv = true;
+ br->cat0.target = block->successors[1];
+
+ /* "then" branch: */
+ br = ir3_BR(block, block->condition, 0);
+ br->cat0.target = block->successors[0];
+
+ } else if (block->successors[0]) {
+ /* otherwise unconditional jump to next block: */
+ struct ir3_instruction *jmp;
+
+ jmp = ir3_JUMP(block);
+ jmp->cat0.target = block->successors[0];
+ }
+ }
+}
+
+/* Insert nop's required to make this a legal/valid shader program: */
+static void
+nop_sched(struct ir3 *ir)
+{
+ foreach_block (block, &ir->block_list) {
+ struct ir3_instruction *last = NULL;
+ struct list_head instr_list;
+
+ /* remove all the instructions from the list, we'll be adding
+ * them back in as we go
+ */
+ list_replace(&block->instr_list, &instr_list);
+ list_inithead(&block->instr_list);
+
+ foreach_instr_safe (instr, &instr_list) {
+ unsigned delay = ir3_delay_calc(block, instr, false, true);
+
+ /* NOTE: I think the nopN encoding works for a5xx and
+ * probably a4xx, but not a3xx. So far only tested on
+ * a6xx.
+ */
+
+ if ((delay > 0) && (ir->compiler->gpu_id >= 600) && last &&
+ ((opc_cat(last->opc) == 2) || (opc_cat(last->opc) == 3))) {
+ /* the previous cat2/cat3 instruction can encode at most 3 nop's: */
+ unsigned transfer = MIN2(delay, 3 - last->nop);
+ last->nop += transfer;
+ delay -= transfer;
+ }
+
+ if ((delay > 0) && last && (last->opc == OPC_NOP)) {
+ /* the previous nop can encode at most 5 repeats: */
+ unsigned transfer = MIN2(delay, 5 - last->repeat);
+ last->repeat += transfer;
+ delay -= transfer;
+ }
+
+ if (delay > 0) {
+ debug_assert(delay <= 6);
+ ir3_NOP(block)->repeat = delay - 1;
+ }
+
+ list_addtail(&instr->node, &block->instr_list);
+ last = instr;
+ }
+ }
+}
+
void
-ir3_legalize(struct ir3 *ir, bool *has_ssbo, bool *need_pixlod, int *max_bary)
+ir3_legalize(struct ir3 *ir, struct ir3_shader_variant *so, int *max_bary)
{
struct ir3_legalize_ctx *ctx = rzalloc(ir, struct ir3_legalize_ctx);
bool progress;
+ ctx->so = so;
ctx->max_bary = -1;
ctx->compiler = ir->compiler;
ctx->type = ir->type;
/* allocate per-block data: */
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
block->data = rzalloc(ctx, struct ir3_legalize_block_data);
}
+ ir3_remove_nops(ir);
+
/* process each block: */
do {
progress = false;
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
progress |= legalize_block(ctx, block);
}
} while (progress);
- *has_ssbo = ctx->has_ssbo;
- *need_pixlod = ctx->need_pixlod;
*max_bary = ctx->max_bary;
+ block_sched(ir);
+ nop_sched(ir);
+
do {
ir3_count_instructions(ir);
} while(resolve_jumps(ir));