So many open coded list iterators were getting annoying.
Signed-off-by: Rob Clark <robdclark@chromium.org>
info->sizedwords = 0;
info->ss = info->sy = 0;
- list_for_each_entry (struct ir3_block, block, &shader->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &shader->block_list) {
+ foreach_instr (instr, &block->instr_list) {
info->sizedwords += 2;
}
}
ptr = dwords = calloc(4, info->sizedwords);
- list_for_each_entry (struct ir3_block, block, &shader->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &shader->block_list) {
+ foreach_instr (instr, &block->instr_list) {
int ret = emit[opc_cat(instr->opc)](instr, dwords, info);
if (ret)
goto fail;
void
ir3_block_clear_mark(struct ir3_block *block)
{
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node)
+ foreach_instr (instr, &block->instr_list)
instr->flags &= ~IR3_INSTR_MARK;
}
void
ir3_clear_mark(struct ir3 *ir)
{
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
ir3_block_clear_mark(block);
}
}
ir3_count_instructions(struct ir3 *ir)
{
unsigned cnt = 0;
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
block->start_ip = cnt;
block->end_ip = cnt;
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr (instr, &block->instr_list) {
instr->ip = cnt++;
block->end_ip = instr->ip;
}
struct ir3_array *
ir3_lookup_array(struct ir3 *ir, unsigned id)
{
- list_for_each_entry (struct ir3_array, arr, &ir->array_list, node)
+ foreach_array (arr, &ir->array_list)
if (arr->id == id)
return arr;
return NULL;
#define foreach_output(__outinstr, __ir) \
foreach_output_n(__outinstr, __i, __ir)
+/* iterators for instructions: */
+#define foreach_instr(__instr, __list) \
+ list_for_each_entry(struct ir3_instruction, __instr, __list, node)
+#define foreach_instr_rev(__instr, __list) \
+ list_for_each_entry_rev(struct ir3_instruction, __instr, __list, node)
+#define foreach_instr_safe(__instr, __list) \
+ list_for_each_entry_safe(struct ir3_instruction, __instr, __list, node)
+
+/* iterators for blocks: */
+#define foreach_block(__block, __list) \
+ list_for_each_entry(struct ir3_block, __block, __list, node)
+#define foreach_block_safe(__block, __list) \
+ list_for_each_entry_safe(struct ir3_block, __block, __list, node)
+
+/* iterators for arrays: */
+#define foreach_array(__array, __list) \
+ list_for_each_entry(struct ir3_array, __array, __list, node)
+
/* dump: */
void ir3_print(struct ir3 *ir);
void ir3_print_instr(struct ir3_instruction *instr);
if (so->image_mapping.num_ibo == 0)
return;
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &ir->block_list) {
+ foreach_instr (instr, &block->instr_list) {
instr->data = NULL;
}
}
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
- list_for_each_entry_safe (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &ir->block_list) {
+ foreach_instr_safe (instr, &block->instr_list) {
struct ir3_register *reg;
foreach_src(reg, instr) {
* First Step: scan shader to find which bary.f/ldlv remain:
*/
- list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &ctx->ir->block_list) {
+ foreach_instr (instr, &block->instr_list) {
if (is_input(instr)) {
unsigned inloc = instr->regs[1]->iim_val;
unsigned i = inloc / 4;
* Third Step: reassign packed inloc's:
*/
- list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &ctx->ir->block_list) {
+ foreach_instr (instr, &block->instr_list) {
if (is_input(instr)) {
unsigned inloc = instr->regs[1]->iim_val;
unsigned i = inloc / 4;
unsigned idx = 0;
/* Collect sampling instructions eligible for pre-dispatch. */
- list_for_each_entry(struct ir3_block, block, &ir->block_list, node) {
- list_for_each_entry_safe(struct ir3_instruction, instr,
- &block->instr_list, node) {
+ foreach_block (block, &ir->block_list) {
+ foreach_instr_safe (instr, &block->instr_list) {
if (instr->opc == OPC_META_TEX_PREFETCH) {
assert(idx < ARRAY_SIZE(ctx->so->sampler_prefetch));
struct ir3_sampler_prefetch *fetch =
*/
if (so->type == MESA_SHADER_TESS_CTRL ||
so->type == MESA_SHADER_GEOMETRY ) {
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &ir->block_list) {
+ foreach_instr (instr, &block->instr_list) {
instr->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
break;
}
struct ir3_array *
ir3_get_array(struct ir3_context *ctx, nir_register *reg)
{
- list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
+ foreach_array (arr, &ctx->ir->array_list) {
if (arr->r == reg)
return arr;
}
* a mov, so we need to do a pass to first count consumers of a
* mov.
*/
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &ir->block_list) {
+ foreach_instr (instr, &block->instr_list) {
struct ir3_instruction *src;
/* by the way, we don't account for false-dep's, so the CP
ir->outputs[n] = eliminate_output_mov(out);
}
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
if (block->condition) {
instr_cp(&ctx, block->condition);
block->condition = eliminate_output_mov(block->condition);
list_delinit(&instr->node);
/* find where to re-insert instruction: */
- list_for_each_entry (struct ir3_instruction, pos, list, node) {
+ foreach_instr (pos, list) {
if (pos->depth > instr->depth) {
list_add(&instr->node, &pos->node);
return;
remove_unused_by_block(struct ir3_block *block)
{
bool progress = false;
- list_for_each_entry_safe (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr_safe (instr, &block->instr_list) {
if (instr->opc == OPC_END || instr->opc == OPC_CHSH || instr->opc == OPC_CHMASK)
continue;
if (instr->flags & IR3_INSTR_UNUSED) {
/* initially mark everything as unused, we'll clear the flag as we
* visit the instructions:
*/
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &ir->block_list) {
+ foreach_instr (instr, &block->instr_list) {
/* special case, if pre-fs texture fetch used, we cannot
* eliminate the barycentric i/j input
*/
foreach_output(out, ir)
ir3_instr_depth(out, 0, false);
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
for (i = 0; i < block->keeps_count; i++)
ir3_instr_depth(block->keeps[i], 0, false);
}
/* mark un-used instructions: */
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
progress |= remove_unused_by_block(block);
}
foreach_output(out, ir)
instr_find_neighbors(out);
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
for (i = 0; i < block->keeps_count; i++) {
struct ir3_instruction *instr = block->keeps[i];
instr_find_neighbors(instr);
list_replace(&block->instr_list, &instr_list);
list_inithead(&block->instr_list);
- list_for_each_entry_safe (struct ir3_instruction, n, &instr_list, node) {
+ foreach_instr_safe (n, &instr_list) {
struct ir3_register *reg;
unsigned i;
static bool
resolve_jumps(struct ir3 *ir)
{
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node)
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node)
+ foreach_block (block, &ir->block_list)
+ foreach_instr (instr, &block->instr_list)
if (is_flow(instr) && instr->cat0.target)
if (resolve_jump(instr))
return true;
static void
mark_xvergence_points(struct ir3 *ir)
{
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
if (block->predecessors->entries > 1) {
/* if a block has more than one possible predecessor, then
* the first instruction is a convergence point.
ctx->type = ir->type;
/* allocate per-block data: */
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
block->data = rzalloc(ctx, struct ir3_legalize_block_data);
}
/* process each block: */
do {
progress = false;
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
progress |= legalize_block(ctx, block);
}
} while (progress);
printf("\n");
}
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr (instr, &block->instr_list) {
print_instr(instr, lvl+1);
}
void
ir3_print(struct ir3 *ir)
{
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node)
+ foreach_block (block, &ir->block_list)
print_block(block, 0);
struct ir3_instruction *out;
static void
ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
{
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr (instr, &block->instr_list) {
struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
if (instr->regs_count == 0)
continue;
static void
ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
{
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr (instr, &block->instr_list) {
struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
#ifdef DEBUG
ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
- list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
+ foreach_block (block, &ctx->ir->block_list) {
ra_block_find_definers(ctx, block);
}
- list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
+ foreach_block (block, &ctx->ir->block_list) {
ra_block_name_instructions(ctx, block);
}
/* and vreg names for array elements: */
base = ctx->class_base[total_class_count];
- list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
+ foreach_array (arr, &ctx->ir->array_list) {
arr->base = base;
ctx->class_alloc_count[total_class_count] += arr->length;
base += arr->length;
block->data = bd;
struct ir3_instruction *first_non_input = NULL;
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr (instr, &block->instr_list) {
if (instr->opc != OPC_META_INPUT) {
first_non_input = instr;
break;
}
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr (instr, &block->instr_list) {
struct ir3_instruction *src;
struct ir3_register *reg;
unsigned bitset_words = BITSET_WORDS(ctx->alloc_count);
bool progress = false;
- list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
+ foreach_block (block, &ctx->ir->block_list) {
struct ir3_ra_block_data *bd = block->data;
/* update livein: */
struct ir3 *ir = ctx->ir;
/* initialize array live ranges: */
- list_for_each_entry (struct ir3_array, arr, &ir->array_list, node) {
+ foreach_array (arr, &ir->array_list) {
arr->start_ip = ~0;
arr->end_ip = 0;
}
* block's def/use bitmasks (used below to calculate per-block
* livein/liveout):
*/
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
ra_block_compute_live_ranges(ctx, block);
}
if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
debug_printf("AFTER LIVEIN/OUT:\n");
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
struct ir3_ra_block_data *bd = block->data;
debug_printf("block%u:\n", block_id(block));
print_bitset(" def", bd->def, ctx->alloc_count);
print_bitset(" l/i", bd->livein, ctx->alloc_count);
print_bitset(" l/o", bd->liveout, ctx->alloc_count);
}
- list_for_each_entry (struct ir3_array, arr, &ir->array_list, node) {
+ foreach_array (arr, &ir->array_list) {
debug_printf("array%u:\n", arr->id);
debug_printf(" length: %u\n", arr->length);
debug_printf(" start_ip: %u\n", arr->start_ip);
}
/* extend start/end ranges based on livein/liveout info from cfg: */
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
struct ir3_ra_block_data *bd = block->data;
for (unsigned i = 0; i < ctx->alloc_count; i++) {
}
}
- list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
+ foreach_array (arr, &ctx->ir->array_list) {
for (unsigned i = 0; i < arr->length; i++) {
if (BITSET_TEST(bd->livein, i + arr->base)) {
arr->start_ip = MIN2(arr->start_ip, block->start_ip);
static void
ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
{
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr (instr, &block->instr_list) {
struct ir3_register *reg;
if (writes_gpr(instr)) {
/* pre-assign array elements:
*/
- list_for_each_entry (struct ir3_array, arr, &ctx->ir->array_list, node) {
+ foreach_array (arr, &ctx->ir->array_list) {
unsigned base = 0;
if (arr->end_ip == 0)
* been assigned:
*/
retry:
- list_for_each_entry (struct ir3_array, arr2, &ctx->ir->array_list, node) {
+ foreach_array (arr2, &ctx->ir->array_list) {
if (arr2 == arr)
break;
if (arr2->end_ip == 0)
if (!ra_allocate(ctx->g))
return -1;
- list_for_each_entry (struct ir3_block, block, &ctx->ir->block_list, node) {
+ foreach_block (block, &ctx->ir->block_list) {
ra_block_alloc(ctx, block);
}
static void
update_use_count(struct ir3 *ir)
{
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &ir->block_list) {
+ foreach_instr (instr, &block->instr_list) {
instr->use_count = 0;
}
}
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_block (block, &ir->block_list) {
+ foreach_instr (instr, &block->instr_list) {
if ((instr->opc == OPC_META_COLLECT) || (instr->opc == OPC_META_SPLIT))
continue;
static void
clear_cache(struct ir3_sched_ctx *ctx, struct ir3_instruction *instr)
{
- list_for_each_entry (struct ir3_instruction, instr2, &ctx->depth_list, node) {
+ foreach_instr (instr2, &ctx->depth_list) {
if ((instr2->data == instr) || (instr2->data == NULL_INSTR) || !instr)
instr2->data = NULL;
}
{
unsigned d = 0;
- list_for_each_entry_rev (struct ir3_instruction, n, &block->instr_list, node) {
+ foreach_instr_rev (n, &block->instr_list) {
if ((n == instr) || (d >= maxd))
return d;
/* NOTE: don't count branch/jump since we don't know yet if they will
* get traversed both when they appear as ssa src to a later instruction
* as well as where they appear in the depth_list.
*/
- list_for_each_entry_rev (struct ir3_instruction, instr, &ctx->depth_list, node) {
+ foreach_instr_rev (instr, &ctx->depth_list) {
struct ir3_instruction *candidate;
candidate = find_instr_recursive(ctx, notes, instr);
/* traverse the list a second time.. but since we cache the result of
* find_instr_recursive() it isn't as bad as it looks.
*/
- list_for_each_entry_rev (struct ir3_instruction, instr, &ctx->depth_list, node) {
+ foreach_instr_rev (instr, &ctx->depth_list) {
struct ir3_instruction *candidate;
candidate = find_instr_recursive(ctx, notes, instr);
* Finally, move all the remaining instructions to the depth-
* list
*/
- list_for_each_entry_safe (struct ir3_instruction, instr, &unscheduled_list, node)
+ foreach_instr_safe (instr, &unscheduled_list)
if (instr->opc == OPC_META_INPUT)
schedule(ctx, instr);
- list_for_each_entry_safe (struct ir3_instruction, instr, &unscheduled_list, node)
+ foreach_instr_safe (instr, &unscheduled_list)
if (instr->opc == OPC_META_TEX_PREFETCH)
schedule(ctx, instr);
- list_for_each_entry_safe (struct ir3_instruction, instr, &unscheduled_list, node)
+ foreach_instr_safe (instr, &unscheduled_list)
ir3_insert_by_depth(instr, &ctx->depth_list);
while (!list_is_empty(&ctx->depth_list)) {
ctx->block = block;
- list_for_each_entry_safe (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr_safe (instr, &block->instr_list) {
unsigned delay = 0;
set_foreach(block->predecessors, entry) {
ir3_clear_mark(ir);
update_use_count(ir);
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
ctx.live_values = 0;
sched_block(&ctx, block);
}
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
sched_intra_block(&ctx, block);
}
static void
calculate_deps(struct ir3_block *block)
{
- list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
+ foreach_instr (instr, &block->instr_list) {
if (instr->barrier_class) {
add_barrier_deps(block, instr);
}
void
ir3_sched_add_deps(struct ir3 *ir)
{
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
calculate_deps(block);
}
}
foreach_output(out, ir)
max = MAX2(max, number_instr(out));
- list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
+ foreach_block (block, &ir->block_list) {
for (unsigned i = 0; i < block->keeps_count; i++)
max = MAX2(max, number_instr(block->keeps[i]));
if (block->condition)