ir3: Plumb through bindless support
[mesa.git] / src / freedreno / ir3 / ir3_legalize.c
index f015c6fede8813e1806cb13bddd8c9e7546a8d3c..bed8cd1c03a8df9c88bd31ea73f8f9200f08ef84 100644 (file)
@@ -41,7 +41,8 @@
 
 struct ir3_legalize_ctx {
        struct ir3_compiler *compiler;
-       bool has_ssbo;
+       struct ir3_shader_variant *so;
+       gl_shader_stage type;
        int max_bary;
 };
 
@@ -85,10 +86,13 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
        struct list_head instr_list;
        struct ir3_legalize_state prev_state = bd->state;
        struct ir3_legalize_state *state = &bd->state;
+       bool last_input_needs_ss = false;
+       bool has_tex_prefetch = false;
 
        /* our input state is the OR of all predecessor blocks' state: */
-       for (unsigned i = 0; i < block->predecessors_count; i++) {
-               struct ir3_legalize_block_data *pbd = block->predecessors[i]->data;
+       set_foreach(block->predecessors, entry) {
+               struct ir3_block *predecessor = (struct ir3_block *)entry->key;
+               struct ir3_legalize_block_data *pbd = predecessor->data;
                struct ir3_legalize_state *pstate = &pbd->state;
 
                /* Our input (ss)/(sy) state is based on OR'ing the output
@@ -108,13 +112,16 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
        list_replace(&block->instr_list, &instr_list);
        list_inithead(&block->instr_list);
 
-       list_for_each_entry_safe (struct ir3_instruction, n, &instr_list, node) {
+       foreach_instr_safe (n, &instr_list) {
                struct ir3_register *reg;
                unsigned i;
 
                n->flags &= ~(IR3_INSTR_SS | IR3_INSTR_SY);
 
-               if (is_meta(n))
+               /* _meta::tex_prefetch instructions removed later in
+                * collect_tex_prefetches()
+                */
+               if (is_meta(n) && (n->opc != OPC_META_TEX_PREFETCH))
                        continue;
 
                if (is_input(n)) {
@@ -123,8 +130,19 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
                        ctx->max_bary = MAX2(ctx->max_bary, inloc->iim_val);
                }
 
-               if (last_n && is_barrier(last_n))
+               if (last_n && is_barrier(last_n)) {
                        n->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
+                       last_input_needs_ss = false;
+                       regmask_init(&state->needs_ss_war);
+                       regmask_init(&state->needs_ss);
+                       regmask_init(&state->needs_sy);
+               }
+
+               if (last_n && (last_n->opc == OPC_IF)) {
+                       n->flags |= IR3_INSTR_SS;
+                       regmask_init(&state->needs_ss_war);
+                       regmask_init(&state->needs_ss);
+               }
 
                /* NOTE: consider dst register too.. it could happen that
                 * texture sample instruction (for example) writes some
@@ -143,6 +161,7 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
                                 */
                                if (regmask_get(&state->needs_ss, reg)) {
                                        n->flags |= IR3_INSTR_SS;
+                                       last_input_needs_ss = false;
                                        regmask_init(&state->needs_ss_war);
                                        regmask_init(&state->needs_ss);
                                }
@@ -165,6 +184,7 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
                        reg = n->regs[0];
                        if (regmask_get(&state->needs_ss_war, reg)) {
                                n->flags |= IR3_INSTR_SS;
+                               last_input_needs_ss = false;
                                regmask_init(&state->needs_ss_war);
                                regmask_init(&state->needs_ss);
                        }
@@ -188,44 +208,48 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
                }
 
                /* need to be able to set (ss) on first instruction: */
-               if (list_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
+               if (list_is_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
                        ir3_NOP(block);
 
-               if (is_nop(n) && !list_empty(&block->instr_list)) {
-                       struct ir3_instruction *last = list_last_entry(&block->instr_list,
-                                       struct ir3_instruction, node);
-                       if (is_nop(last) && (last->repeat < 5)) {
-                               last->repeat++;
-                               last->flags |= n->flags;
-                               continue;
-                       }
+               if (ctx->compiler->samgq_workaround &&
+                       ctx->type == MESA_SHADER_VERTEX && n->opc == OPC_SAMGQ) {
+                       struct ir3_instruction *samgp;
 
-                       /* NOTE: I think the nopN encoding works for a5xx and
-                        * probably a4xx, but not a3xx.  So far only tested on
-                        * a6xx.
-                        */
-                       if ((ctx->compiler->gpu_id >= 600) && !n->flags && (last->nop < 3) &&
-                                       ((opc_cat(last->opc) == 2) || (opc_cat(last->opc) == 3))) {
-                               last->nop++;
-                               continue;
+                       list_delinit(&n->node);
+
+                       for (i = 0; i < 4; i++) {
+                               samgp = ir3_instr_clone(n);
+                               samgp->opc = OPC_SAMGP0 + i;
+                               if (i > 1)
+                                       samgp->flags |= IR3_INSTR_SY;
                        }
+               } else {
+                       list_addtail(&n->node, &block->instr_list);
                }
 
-               list_addtail(&n->node, &block->instr_list);
+               if (n->opc == OPC_DSXPP_1 || n->opc == OPC_DSYPP_1) {
+                       struct ir3_instruction *op_p = ir3_instr_clone(n);
+                       op_p->flags = IR3_INSTR_P;
+
+                       ctx->so->need_fine_derivatives = true;
+               }
 
                if (is_sfu(n))
                        regmask_set(&state->needs_ss, n->regs[0]);
 
-               if (is_tex(n)) {
+               if (is_tex_or_prefetch(n)) {
                        regmask_set(&state->needs_sy, n->regs[0]);
+                       if (n->opc == OPC_META_TEX_PREFETCH)
+                               has_tex_prefetch = true;
                } else if (n->opc == OPC_RESINFO) {
                        regmask_set(&state->needs_ss, n->regs[0]);
                        ir3_NOP(block)->flags |= IR3_INSTR_SS;
+                       last_input_needs_ss = false;
                } else if (is_load(n)) {
                        /* seems like ldlv needs (ss) bit instead??  which is odd but
                         * makes a bunch of flat-varying tests start working on a4xx.
                         */
-                       if ((n->opc == OPC_LDLV) || (n->opc == OPC_LDL))
+                       if ((n->opc == OPC_LDLV) || (n->opc == OPC_LDL) || (n->opc == OPC_LDLW))
                                regmask_set(&state->needs_ss, n->regs[0]);
                        else
                                regmask_set(&state->needs_sy, n->regs[0]);
@@ -243,25 +267,29 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
                }
 
                if (is_ssbo(n->opc) || (is_atomic(n->opc) && (n->flags & IR3_INSTR_G)))
-                       ctx->has_ssbo = true;
+                       ctx->so->has_ssbo = true;
 
                /* both tex/sfu appear to not always immediately consume
                 * their src register(s):
                 */
                if (is_tex(n) || is_sfu(n) || is_mem(n)) {
-                       foreach_src(reg, n) {
+                       foreach_src (reg, n) {
                                if (reg_gpr(reg))
                                        regmask_set(&state->needs_ss_war, reg);
                        }
                }
 
-               if (is_input(n))
+               if (is_input(n)) {
                        last_input = n;
+                       last_input_needs_ss |= (n->opc == OPC_LDLV);
+               }
 
                last_n = n;
        }
 
        if (last_input) {
+               assert(block == list_first_entry(&block->shader->block_list,
+                               struct ir3_block, node));
                /* special hack.. if using ldlv to bypass interpolation,
                 * we need to insert a dummy bary.f on which we can set
                 * the (ei) flag:
@@ -271,7 +299,6 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
 
                        /* (ss)bary.f (ei)r63.x, 0, r0.x */
                        baryf = ir3_instr_create(block, OPC_BARY_F);
-                       baryf->flags |= IR3_INSTR_SS;
                        ir3_reg_create(baryf, regid(63, 0), 0);
                        ir3_reg_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
                        ir3_reg_create(baryf, regid(0, 0), 0);
@@ -281,8 +308,31 @@ legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
                        list_add(&baryf->node, &last_input->node);
 
                        last_input = baryf;
+
+                       /* by definition, we need (ss) since we are inserting
+                        * the dummy bary.f immediately after the ldlv:
+                        */
+                       last_input_needs_ss = true;
                }
                last_input->regs[0]->flags |= IR3_REG_EI;
+               if (last_input_needs_ss)
+                       last_input->flags |= IR3_INSTR_SS;
+       } else if (has_tex_prefetch) {
+               /* texture prefetch, but *no* inputs.. we need to insert a
+                * dummy bary.f at the top of the shader to unblock varying
+                * storage:
+                */
+               struct ir3_instruction *baryf;
+
+               /* (ss)bary.f (ei)r63.x, 0, r0.x */
+               baryf = ir3_instr_create(block, OPC_BARY_F);
+               ir3_reg_create(baryf, regid(63, 0), 0)->flags |= IR3_REG_EI;
+               ir3_reg_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
+               ir3_reg_create(baryf, regid(0, 0), 0);
+
+               /* insert the dummy bary.f at head: */
+               list_delinit(&baryf->node);
+               list_add(&baryf->node, &block->instr_list);
        }
 
        if (last_rel)
@@ -355,7 +405,7 @@ resolve_dest_block(struct ir3_block *block)
         *   (2) (block-is-empty || only-instr-is-jump)
         */
        if (block->successors[1] == NULL) {
-               if (list_empty(&block->instr_list)) {
+               if (list_is_empty(&block->instr_list)) {
                        return block->successors[0];
                } else if (list_length(&block->instr_list) == 1) {
                        struct ir3_instruction *instr = list_first_entry(
@@ -367,6 +417,47 @@ resolve_dest_block(struct ir3_block *block)
        return block;
 }
 
+static void
+remove_unused_block(struct ir3_block *old_target)
+{
+       list_delinit(&old_target->node);
+
+       /* cleanup dangling predecessors: */
+       for (unsigned i = 0; i < ARRAY_SIZE(old_target->successors); i++) {
+               if (old_target->successors[i]) {
+                       struct ir3_block *succ = old_target->successors[i];
+                       _mesa_set_remove_key(succ->predecessors, old_target);
+               }
+       }
+}
+
+static void
+retarget_jump(struct ir3_instruction *instr, struct ir3_block *new_target)
+{
+       struct ir3_block *old_target = instr->cat0.target;
+       struct ir3_block *cur_block = instr->block;
+
+       /* update current blocks successors to reflect the retargetting: */
+       if (cur_block->successors[0] == old_target) {
+               cur_block->successors[0] = new_target;
+       } else {
+               debug_assert(cur_block->successors[1] == old_target);
+               cur_block->successors[1] = new_target;
+       }
+
+       /* update new target's predecessors: */
+       _mesa_set_add(new_target->predecessors, cur_block);
+
+       /* and remove old_target's predecessor: */
+       debug_assert(_mesa_set_search(old_target->predecessors, cur_block));
+       _mesa_set_remove_key(old_target->predecessors, cur_block);
+
+       if (old_target->predecessors->entries == 0)
+               remove_unused_block(old_target);
+
+       instr->cat0.target = new_target;
+}
+
 static bool
 resolve_jump(struct ir3_instruction *instr)
 {
@@ -375,8 +466,7 @@ resolve_jump(struct ir3_instruction *instr)
        struct ir3_instruction *target;
 
        if (tblock != instr->cat0.target) {
-               list_delinit(&instr->cat0.target->node);
-               instr->cat0.target = tblock;
+               retarget_jump(instr, tblock);
                return true;
        }
 
@@ -398,7 +488,7 @@ resolve_jump(struct ir3_instruction *instr)
        else
                next_block = 1;
 
-       if ((!target) || (target->ip == (instr->ip + next_block))) {
+       if (target->ip == (instr->ip + next_block)) {
                list_delinit(&instr->node);
                return true;
        } else {
@@ -417,8 +507,8 @@ resolve_jump(struct ir3_instruction *instr)
 static bool
 resolve_jumps(struct ir3 *ir)
 {
-       list_for_each_entry (struct ir3_block, block, &ir->block_list, node)
-               list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node)
+       foreach_block (block, &ir->block_list)
+               foreach_instr (instr, &block->instr_list)
                        if (is_flow(instr) && instr->cat0.target)
                                if (resolve_jump(instr))
                                        return true;
@@ -426,80 +516,166 @@ resolve_jumps(struct ir3 *ir)
        return false;
 }
 
-/* we want to mark points where divergent flow control re-converges
- * with (jp) flags.  For now, since we don't do any optimization for
- * things that start out as a 'do {} while()', re-convergence points
- * will always be a branch or jump target.  Note that this is overly
- * conservative, since unconditional jump targets are not convergence
- * points, we are just assuming that the other path to reach the jump
- * target was divergent.  If we were clever enough to optimize the
- * jump at end of a loop back to a conditional branch into a single
- * conditional branch, ie. like:
+static void mark_jp(struct ir3_block *block)
+{
+       struct ir3_instruction *target = list_first_entry(&block->instr_list,
+                       struct ir3_instruction, node);
+       target->flags |= IR3_INSTR_JP;
+}
+
+/* Mark points where control flow converges or diverges.
  *
- *    add.f r1.w, r0.x, (neg)(r)c2.x   <= loop start
- *    mul.f r1.z, r1.z, r0.x
- *    mul.f r1.y, r1.y, r0.x
- *    mul.f r0.z, r1.x, r0.x
- *    mul.f r0.w, r0.y, r0.x
- *    cmps.f.ge r0.x, (r)c2.y, (r)r1.w
- *    add.s r0.x, (r)r0.x, (r)-1
- *    sel.f32 r0.x, (r)c3.y, (r)r0.x, c3.x
- *    cmps.f.eq p0.x, r0.x, c3.y
- *    mov.f32f32 r0.x, r1.w
- *    mov.f32f32 r0.y, r0.w
- *    mov.f32f32 r1.x, r0.z
- *    (rpt2)nop
- *    br !p0.x, #-13
- *    (jp)mul.f r0.x, c263.y, r1.y
+ * Divergence points could actually be re-convergence points where
+ * "parked" threads are recoverged with threads that took the opposite
+ * path last time around.  Possibly it is easier to think of (jp) as
+ * "the execution mask might have changed".
+ */
+static void
+mark_xvergence_points(struct ir3 *ir)
+{
+       foreach_block (block, &ir->block_list) {
+               if (block->predecessors->entries > 1) {
+                       /* if a block has more than one possible predecessor, then
+                        * the first instruction is a convergence point.
+                        */
+                       mark_jp(block);
+               } else if (block->predecessors->entries == 1) {
+                       /* If a block has one predecessor, which has multiple possible
+                        * successors, it is a divergence point.
+                        */
+                       set_foreach(block->predecessors, entry) {
+                               struct ir3_block *predecessor = (struct ir3_block *)entry->key;
+                               if (predecessor->successors[1]) {
+                                       mark_jp(block);
+                               }
+                       }
+               }
+       }
+}
+
+/* Insert the branch/jump instructions for flow control between blocks.
+ * Initially this is done naively, without considering if the successor
+ * block immediately follows the current block (ie. so no jump required),
+ * but that is cleaned up in resolve_jumps().
  *
- * Then we'd have to be more clever, as the convergence point is no
- * longer a branch or jump target.
+ * TODO what ensures that the last write to p0.x in a block is the
+ * branch condition?  Have we been getting lucky all this time?
  */
 static void
-mark_convergence_points(struct ir3 *ir)
+block_sched(struct ir3 *ir)
+{
+       foreach_block (block, &ir->block_list) {
+               if (block->successors[1]) {
+                       /* if/else, conditional branches to "then" or "else": */
+                       struct ir3_instruction *br;
+
+                       debug_assert(block->condition);
+
+                       /* create "else" branch first (since "then" block should
+                        * frequently/always end up being a fall-thru):
+                        */
+                       br = ir3_BR(block, block->condition, 0);
+                       br->cat0.inv = true;
+                       br->cat0.target = block->successors[1];
+
+                       /* "then" branch: */
+                       br = ir3_BR(block, block->condition, 0);
+                       br->cat0.target = block->successors[0];
+
+               } else if (block->successors[0]) {
+                       /* otherwise unconditional jump to next block: */
+                       struct ir3_instruction *jmp;
+
+                       jmp = ir3_JUMP(block);
+                       jmp->cat0.target = block->successors[0];
+               }
+       }
+}
+
+/* Insert nop's required to make this a legal/valid shader program: */
+static void
+nop_sched(struct ir3 *ir)
 {
-       list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
-               list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
-                       if (is_flow(instr) && instr->cat0.target) {
-                               struct ir3_instruction *target =
-                                       list_first_entry(&instr->cat0.target->instr_list,
-                                                       struct ir3_instruction, node);
-                               target->flags |= IR3_INSTR_JP;
+       foreach_block (block, &ir->block_list) {
+               struct ir3_instruction *last = NULL;
+               struct list_head instr_list;
+
+               /* remove all the instructions from the list, we'll be adding
+                * them back in as we go
+                */
+               list_replace(&block->instr_list, &instr_list);
+               list_inithead(&block->instr_list);
+
+               foreach_instr_safe (instr, &instr_list) {
+                       unsigned delay = ir3_delay_calc(block, instr, false, true);
+
+                       /* NOTE: I think the nopN encoding works for a5xx and
+                        * probably a4xx, but not a3xx.  So far only tested on
+                        * a6xx.
+                        */
+
+                       if ((delay > 0) && (ir->compiler->gpu_id >= 600) && last &&
+                                       ((opc_cat(last->opc) == 2) || (opc_cat(last->opc) == 3))) {
+                               /* the previous cat2/cat3 instruction can encode at most 3 nop's: */
+                               unsigned transfer = MIN2(delay, 3 - last->nop);
+                               last->nop += transfer;
+                               delay -= transfer;
+                       }
+
+                       if ((delay > 0) && last && (last->opc == OPC_NOP)) {
+                               /* the previous nop can encode at most 5 repeats: */
+                               unsigned transfer = MIN2(delay, 5 - last->repeat);
+                               last->repeat += transfer;
+                               delay -= transfer;
                        }
+
+                       if (delay > 0) {
+                               debug_assert(delay <= 6);
+                               ir3_NOP(block)->repeat = delay - 1;
+                       }
+
+                       list_addtail(&instr->node, &block->instr_list);
+                       last = instr;
                }
        }
 }
 
 void
-ir3_legalize(struct ir3 *ir, bool *has_ssbo, int *max_bary)
+ir3_legalize(struct ir3 *ir, struct ir3_shader_variant *so, int *max_bary)
 {
        struct ir3_legalize_ctx *ctx = rzalloc(ir, struct ir3_legalize_ctx);
        bool progress;
 
+       ctx->so = so;
        ctx->max_bary = -1;
        ctx->compiler = ir->compiler;
+       ctx->type = ir->type;
 
        /* allocate per-block data: */
-       list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+       foreach_block (block, &ir->block_list) {
                block->data = rzalloc(ctx, struct ir3_legalize_block_data);
        }
 
+       ir3_remove_nops(ir);
+
        /* process each block: */
        do {
                progress = false;
-               list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+               foreach_block (block, &ir->block_list) {
                        progress |= legalize_block(ctx, block);
                }
        } while (progress);
 
-       *has_ssbo = ctx->has_ssbo;
        *max_bary = ctx->max_bary;
 
+       block_sched(ir);
+       nop_sched(ir);
+
        do {
                ir3_count_instructions(ir);
        } while(resolve_jumps(ir));
 
-       mark_convergence_points(ir);
+       mark_xvergence_points(ir);
 
        ralloc_free(ctx);
 }