};
} midgard_instruction;
-typedef struct midgard_block {
+typedef struct pan_block {
/* Link to next block. Must be first for mir_get_block */
struct list_head link;
- /* List of midgard_instructions emitted for the current block */
+ /* List of instructions emitted for the current block */
struct list_head instructions;
/* Index of the block in source order */
unsigned name;
- bool scheduled;
-
- /* List of midgard_bundles emitted (after the scheduler has run) */
- struct util_dynarray bundles;
-
- /* Number of quadwords _actually_ emitted, as determined after scheduling */
- unsigned quadword_count;
-
- /* Succeeding blocks. The compiler should not necessarily rely on
- * source-order traversal */
- struct midgard_block *successors[2];
+ /* Control flow graph */
+ struct pan_block *successors[2];
unsigned nr_successors;
struct set *predecessors;
* simple bit fields, but for us, liveness is a vector idea. */
uint16_t *live_in;
uint16_t *live_out;
+} pan_block;
+
+typedef struct midgard_block {
+ pan_block base;
+
+ bool scheduled;
+
+ /* List of midgard_bundles emitted (after the scheduler has run) */
+ struct util_dynarray bundles;
+
+ /* Number of quadwords _actually_ emitted, as determined after scheduling */
+ unsigned quadword_count;
/* Indicates this is a fixed-function fragment epilogue block */
bool epilogue;
emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction ins)
{
midgard_instruction *u = mir_upload_ins(ctx, ins);
- list_addtail(&u->link, &ctx->current_block->instructions);
+ list_addtail(&u->link, &ctx->current_block->base.instructions);
return u;
}
}
#define mir_foreach_block(ctx, v) \
- list_for_each_entry(struct midgard_block, v, &ctx->blocks, link)
+ list_for_each_entry(pan_block, v, &ctx->blocks, link)
#define mir_foreach_block_from(ctx, from, v) \
- list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
+ list_for_each_entry_from(pan_block, v, &from->base, &ctx->blocks, link)
#define mir_foreach_instr_in_block(block, v) \
- list_for_each_entry(struct midgard_instruction, v, &block->instructions, link)
+ list_for_each_entry(struct midgard_instruction, v, &block->base.instructions, link)
#define mir_foreach_instr_in_block_rev(block, v) \
+ list_for_each_entry_rev(struct midgard_instruction, v, &block->base.instructions, link)
+
+#define pan_foreach_instr_in_block_rev(block, v) \
list_for_each_entry_rev(struct midgard_instruction, v, &block->instructions, link)
#define mir_foreach_instr_in_block_safe(block, v) \
- list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
+ list_for_each_entry_safe(struct midgard_instruction, v, &block->base.instructions, link)
#define mir_foreach_instr_in_block_safe_rev(block, v) \
- list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link)
+ list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->base.instructions, link)
#define mir_foreach_instr_in_block_from(block, v, from) \
- list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link)
+ list_for_each_entry_from(struct midgard_instruction, v, from, &block->base.instructions, link)
#define mir_foreach_instr_in_block_from_rev(block, v, from) \
- list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link)
+ list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->base.instructions, link)
#define mir_foreach_bundle_in_block(block, v) \
util_dynarray_foreach(&block->bundles, midgard_bundle, v)
#define mir_foreach_instr_global(ctx, v) \
mir_foreach_block(ctx, v_block) \
- mir_foreach_instr_in_block(v_block, v)
+ mir_foreach_instr_in_block(((midgard_block *) v_block), v)
#define mir_foreach_instr_global_safe(ctx, v) \
mir_foreach_block(ctx, v_block) \
- mir_foreach_instr_in_block_safe(v_block, v)
+ mir_foreach_instr_in_block_safe(((midgard_block *) v_block), v)
#define mir_foreach_successor(blk, v) \
struct midgard_block *v; \
struct midgard_block **_v; \
- for (_v = &blk->successors[0], \
+ for (_v = &blk->base.successors[0], \
+ v = *_v; \
+ v != NULL && _v < &blk->base.successors[2]; \
+ _v++, v = *_v) \
+
+#define pan_foreach_successor(blk, v) \
+ pan_block *v; \
+ pan_block **_v; \
+ for (_v = (pan_block **) &blk->successors[0], \
v = *_v; \
- v != NULL && _v < &blk->successors[2]; \
+ v != NULL && _v < (pan_block **) &blk->successors[2]; \
_v++, v = *_v) \
/* Based on set_foreach, expanded with automatic type casts */
#define mir_foreach_predecessor(blk, v) \
struct set_entry *_entry_##v; \
struct midgard_block *v; \
- for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
+ for (_entry_##v = _mesa_set_next_entry(blk->base.predecessors, NULL), \
v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL); \
_entry_##v != NULL; \
- _entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
+ _entry_##v = _mesa_set_next_entry(blk->base.predecessors, _entry_##v), \
v = (struct midgard_block *) (_entry_##v ? _entry_##v->key : NULL))
+#define pan_foreach_predecessor(blk, v) \
+ struct set_entry *_entry_##v; \
+ struct pan_block *v; \
+ for (_entry_##v = _mesa_set_next_entry(blk->predecessors, NULL), \
+ v = (struct pan_block *) (_entry_##v ? _entry_##v->key : NULL); \
+ _entry_##v != NULL; \
+ _entry_##v = _mesa_set_next_entry(blk->predecessors, _entry_##v), \
+ v = (struct pan_block *) (_entry_##v ? _entry_##v->key : NULL))
+
#define mir_foreach_src(ins, v) \
for (unsigned v = 0; v < ARRAY_SIZE(ins->src); ++v)
static inline midgard_instruction *
mir_last_in_block(struct midgard_block *block)
{
- return list_last_entry(&block->instructions, struct midgard_instruction, link);
+ return list_last_entry(&block->base.instructions, struct midgard_instruction, link);
}
static inline midgard_block *
static inline midgard_block *
mir_exit_block(struct compiler_context *ctx)
{
- midgard_block *last = list_last_entry(&ctx->blocks,
- struct midgard_block, link);
+ pan_block *last = list_last_entry(&ctx->blocks, pan_block, link);
/* The last block must be empty logically but contains branch writeout
* for fragment shaders */
assert(last->nr_successors == 0);
- return last;
+ return (midgard_block *) last;
}
static inline bool
{
midgard_block *blk = rzalloc(ctx, midgard_block);
- blk->predecessors = _mesa_set_create(blk,
+ blk->base.predecessors = _mesa_set_create(blk,
_mesa_hash_pointer,
_mesa_key_pointer_equal);
- blk->name = ctx->block_source_count++;
+ blk->base.name = ctx->block_source_count++;
return blk;
}
static void
-midgard_block_add_successor(midgard_block *block, midgard_block *successor)
+pan_block_add_successor(pan_block *block, pan_block *successor)
{
assert(block);
assert(successor);
/* Deduplicate */
for (unsigned i = 0; i < block->nr_successors; ++i) {
- if (block->successors[i] == successor)
+ if ((pan_block *) block->successors[i] == successor)
return;
}
midgard_block *temp = ctx->after_block;
ctx->after_block = create_empty_block(ctx);
ctx->block_count++;
- list_addtail(&ctx->after_block->link, &ctx->blocks);
- list_inithead(&ctx->after_block->instructions);
- midgard_block_add_successor(ctx->current_block, ctx->after_block);
+ list_addtail(&ctx->after_block->base.link, &ctx->blocks);
+ list_inithead(&ctx->after_block->base.instructions);
+ pan_block_add_successor(&ctx->current_block->base, &ctx->after_block->base);
ctx->current_block = ctx->after_block;
ctx->after_block = temp;
}
if (!this_block)
this_block = create_empty_block(ctx);
- list_addtail(&this_block->link, &ctx->blocks);
+ list_addtail(&this_block->base.link, &ctx->blocks);
this_block->scheduled = false;
++ctx->block_count;
/* Set up current block */
- list_inithead(&this_block->instructions);
+ list_inithead(&this_block->base.instructions);
ctx->current_block = this_block;
nir_foreach_instr(instr, block) {
ctx->after_block = create_empty_block(ctx);
- midgard_block_add_successor(before_block, then_block);
- midgard_block_add_successor(before_block, else_block);
+ pan_block_add_successor(&before_block->base, &then_block->base);
+ pan_block_add_successor(&before_block->base, &else_block->base);
- midgard_block_add_successor(end_then_block, ctx->after_block);
- midgard_block_add_successor(end_else_block, ctx->after_block);
+ pan_block_add_successor(&end_then_block->base, &ctx->after_block->base);
+ pan_block_add_successor(&end_else_block->base, &ctx->after_block->base);
}
static void
emit_mir_instruction(ctx, br_back);
/* Mark down that branch in the graph. */
- midgard_block_add_successor(start_block, loop_block);
- midgard_block_add_successor(ctx->current_block, loop_block);
+ pan_block_add_successor(&start_block->base, &loop_block->base);
+ pan_block_add_successor(&ctx->current_block->base, &loop_block->base);
/* Find the index of the block about to follow us (note: we don't add
* one; blocks are 0-indexed so we get a fencepost problem) */
* now that we can allocate a block number for them */
ctx->after_block = create_empty_block(ctx);
- list_for_each_entry_from(struct midgard_block, block, start_block, &ctx->blocks, link) {
- mir_foreach_instr_in_block(block, ins) {
+ mir_foreach_block_from(ctx, start_block, _block) {
+ mir_foreach_instr_in_block(((midgard_block *) _block), ins) {
if (ins->type != TAG_ALU_4) continue;
if (!ins->compact_branch) continue;
ins->branch.target_type = TARGET_GOTO;
ins->branch.target_block = break_block_idx;
- midgard_block_add_successor(block, ctx->after_block);
+ pan_block_add_successor(_block, &ctx->after_block->base);
}
}
{
midgard_block *initial_block = mir_get_block(ctx, block_idx);
- mir_foreach_block_from(ctx, initial_block, v) {
+ mir_foreach_block_from(ctx, initial_block, _v) {
+ midgard_block *v = (midgard_block *) _v;
if (v->quadword_count) {
midgard_bundle *initial_bundle =
util_dynarray_element(&v->bundles, midgard_bundle, 0);
if (!br) continue;
unsigned popped = br->branch.target_block;
- midgard_block_add_successor(mir_get_block(ctx, popped - 1), ctx->current_block);
+ pan_block_add_successor(&(mir_get_block(ctx, popped - 1)->base), &ctx->current_block->base);
br->branch.target_block = emit_fragment_epilogue(ctx, rt);
/* If we have more RTs, we'll need to restore back after our
midgard_instruction uncond = v_branch(false, false);
uncond.branch.target_block = popped;
emit_mir_instruction(ctx, uncond);
- midgard_block_add_successor(ctx->current_block, mir_get_block(ctx, popped));
+ pan_block_add_successor(&ctx->current_block->base, &(mir_get_block(ctx, popped)->base));
schedule_barrier(ctx);
} else {
/* We're last, so we can terminate here */
/* Per-block lowering before opts */
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
inline_alu_constants(ctx, block);
midgard_opt_promote_fmov(ctx, block);
embedded_to_inline_constant(ctx, block);
do {
progress = false;
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
progress |= midgard_opt_pos_propagate(ctx, block);
progress |= midgard_opt_copy_prop(ctx, block);
progress |= midgard_opt_dead_code_eliminate(ctx, block);
}
} while (progress);
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
midgard_lower_invert(ctx, block);
midgard_lower_derivatives(ctx, block);
}
/* Nested control-flow can result in dead branches at the end of the
* block. This messes with our analysis and is just dead code, so cull
* them */
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
midgard_opt_cull_dead_branch(ctx, block);
}
int br_block_idx = 0;
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
for (int c = 0; c < bundle->instruction_count; ++c) {
midgard_instruction *ins = bundle->instructions[c];
/* Cache _all_ bundles in source order for lookahead across failed branches */
int bundle_count = 0;
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
bundle_count += block->bundles.size / sizeof(midgard_bundle);
}
midgard_bundle **source_order_bundles = malloc(sizeof(midgard_bundle *) * bundle_count);
int bundle_idx = 0;
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) {
source_order_bundles[bundle_idx++] = bundle;
}
* need to lookahead. Unless this is the last instruction, in
* which we return 1. */
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
mir_foreach_bundle_in_block(block, bundle) {
int lookahead = 1;
/* Count instructions and bundles */
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
nr_bundles += util_dynarray_num_elements(
&block->bundles, midgard_bundle);
/* live_out[s] = sum { p in succ[s] } ( live_in[p] ) */
static void
-liveness_block_live_out(midgard_block *blk, unsigned temp_count)
+liveness_block_live_out(pan_block *blk, unsigned temp_count)
{
- mir_foreach_successor(blk, succ) {
+ pan_foreach_successor(blk, succ) {
for (unsigned i = 0; i < temp_count; ++i)
blk->live_out[i] |= succ->live_in[i];
}
* returns whether progress was made. */
static bool
-liveness_block_update(midgard_block *blk, unsigned temp_count)
+liveness_block_update(pan_block *blk, unsigned temp_count)
{
bool progress = false;
uint16_t *live = ralloc_array(blk, uint16_t, temp_count);
memcpy(live, blk->live_out, temp_count * sizeof(uint16_t));
- mir_foreach_instr_in_block_rev(blk, ins)
+ pan_foreach_instr_in_block_rev(blk, ins)
mir_liveness_ins_update(live, ins, temp_count);
/* To figure out progress, diff live_in */
do {
/* Pop off a block */
- midgard_block *blk = (struct midgard_block *) cur->key;
+ pan_block *blk = (struct pan_block *) cur->key;
_mesa_set_remove(work_list, cur);
/* Update its liveness information */
/* If we made progress, we need to process the predecessors */
if (progress || !_mesa_set_search(visited, blk)) {
- mir_foreach_predecessor(blk, pred)
+ pan_foreach_predecessor(blk, pred)
_mesa_set_add(work_list, pred);
}
/* Check whether we're live in the successors */
- if (liveness_get(block->live_out, src, ctx->temp_count))
+ if (liveness_get(block->base.live_out, src, ctx->temp_count))
return true;
/* Check the rest of the block for liveness */
mir_invalidate_liveness(ctx);
mir_compute_liveness(ctx);
- uint16_t *live = mem_dup(block->live_out, ctx->temp_count * sizeof(uint16_t));
+ uint16_t *live = mem_dup(block->base.live_out, ctx->temp_count * sizeof(uint16_t));
mir_foreach_instr_in_block_rev(block, ins) {
if (can_cull_mask(ctx, ins)) {
void
mir_print_block(midgard_block *block)
{
- printf("block%u: {\n", block->name);
+ printf("block%u: {\n", block->base.name);
if (block->scheduled) {
mir_foreach_bundle_in_block(block, bundle) {
printf("}");
- if (block->nr_successors) {
+ if (block->base.nr_successors) {
printf(" -> ");
- for (unsigned i = 0; i < block->nr_successors; ++i) {
- printf("block%u%s", block->successors[i]->name,
- (i + 1) != block->nr_successors ? ", " : "");
+ for (unsigned i = 0; i < block->base.nr_successors; ++i) {
+ printf("block%u%s", block->base.successors[i]->name,
+ (i + 1) != block->base.nr_successors ? ", " : "");
}
}
printf(" from { ");
mir_foreach_predecessor(block, pred)
- printf("block%u ", pred->name);
+ printf("block%u ", pred->base.name);
printf("}");
printf("\n\n");
mir_print_shader(compiler_context *ctx)
{
mir_foreach_block(ctx, block) {
- mir_print_block(block);
+ mir_print_block((midgard_block *) block);
}
}
if (ctx->is_blend) {
unsigned r1w = ~0;
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
mir_foreach_instr_in_block_rev(block, ins) {
if (ins->writeout)
r1w = ins->src[2];
* interference by walking each block linearly. Take live_out at the
* end of each block and walk the block backwards. */
- mir_foreach_block(ctx, blk) {
- uint16_t *live = mem_dup(blk->live_out, ctx->temp_count * sizeof(uint16_t));
+ mir_foreach_block(ctx, _blk) {
+ midgard_block *blk = (midgard_block *) _blk;
+ uint16_t *live = mem_dup(_blk->live_out, ctx->temp_count * sizeof(uint16_t));
mir_foreach_instr_in_block_rev(blk, ins) {
/* Mark all registers live after the instruction as
if (is_special_w)
spill_slot = spill_index++;
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->dest != spill_node) continue;
* work registers to back special registers; TLS
* spilling is to use memory to back work registers) */
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
mir_foreach_instr_in_block(block, ins) {
/* We can't rewrite the moves used to spill in the
* first place. These moves are hinted. */
{
mir_invalidate_liveness(ctx);
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
+
mir_foreach_bundle_in_block(block, bundle) {
if (!mir_is_alu_bundle(bundle)) continue;
if (bundle->instruction_count < 2) continue;
static midgard_instruction **
flatten_mir(midgard_block *block, unsigned *len)
{
- *len = list_length(&block->instructions);
+ *len = list_length(&block->base.instructions);
if (!(*len))
return NULL;
}
mir_foreach_instr_in_block_scheduled_rev(block, ins) {
- list_add(&ins->link, &block->instructions);
+ list_add(&ins->link, &block->base.instructions);
}
free(instructions); /* Allocated by flatten_mir() */
/* Lowering can introduce some dead moves */
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
midgard_opt_dead_move_eliminate(ctx, block);
schedule_block(ctx, block);
}
unsigned max_live = 0;
- mir_foreach_block(ctx, block) {
- uint16_t *live = mem_dup(block->live_out, ctx->temp_count * sizeof(uint16_t));
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
+ uint16_t *live = mem_dup(block->base.live_out, ctx->temp_count * sizeof(uint16_t));
mir_foreach_instr_in_block_rev(block, ins) {
unsigned count = mir_count_live(live, ctx->temp_count);