dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
/* by definition, should come before: */
- debug_assert(instr_before(dd, d));
+ ra_assert(ctx, instr_before(dd, d));
*sz = MAX2(*sz, dsz);
d = dd;
}
- debug_assert(d->opc != OPC_META_SPLIT);
+ ra_assert(ctx, d->opc != OPC_META_SPLIT);
id->defn = d;
id->sz = *sz;
return pick_in_range(regs, base, base + max_target);
}
} else {
- assert(sz == 1);
+ ra_assert(ctx, sz == 1);
}
/* NOTE: this is only used in scalar pass, so the register
static struct ir3_instruction *
name_to_instr(struct ir3_ra_ctx *ctx, unsigned name)
{
- assert(!name_is_array(ctx, name));
+ ra_assert(ctx, !name_is_array(ctx, name));
struct hash_entry *entry = _mesa_hash_table_search(ctx->name_to_instr, &name);
if (entry)
return entry->data;
- unreachable("invalid instr name");
+ ra_unreachable(ctx, "invalid instr name");
return NULL;
}
static struct ir3_array *
name_to_array(struct ir3_ra_ctx *ctx, unsigned name)
{
- assert(name_is_array(ctx, name));
+ ra_assert(ctx, name_is_array(ctx, name));
foreach_array (arr, &ctx->ir->array_list) {
unsigned sz = reg_size_for_array(arr);
if (name < (arr->base + sz))
return arr;
}
- unreachable("invalid array name");
+ ra_unreachable(ctx, "invalid array name");
return NULL;
}
__def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
struct ir3_instruction *instr)
{
- debug_assert(name < ctx->alloc_count);
+ ra_assert(ctx, name < ctx->alloc_count);
/* split/collect do not actually define any real value */
if ((instr->opc == OPC_META_SPLIT) || (instr->opc == OPC_META_COLLECT))
__use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
struct ir3_instruction *instr)
{
- debug_assert(name < ctx->alloc_count);
+ ra_assert(ctx, name < ctx->alloc_count);
ctx->use[name] = MAX2(ctx->use[name], instr->ip);
if (!BITSET_TEST(bd->def, name))
BITSET_SET(bd->use, name);
*/
unsigned *key = ralloc(ctx->name_to_instr, unsigned);
*key = name;
- debug_assert(!_mesa_hash_table_search(ctx->name_to_instr, key));
+ ra_assert(ctx, !_mesa_hash_table_search(ctx->name_to_instr, key));
_mesa_hash_table_insert(ctx->name_to_instr, key, instr);
}
}
struct ir3_ra_block_data *bd = block->data;
unsigned name;
- assert(ctx->name_to_instr);
+ ra_assert(ctx, ctx->name_to_instr);
/* TODO this gets a bit more complicated in non-scalar pass.. but
* possibly a lowball estimate is fine to start with if we do
* round-robin in non-scalar pass? Maybe we just want to handle
* that in a different fxn?
*/
- assert(ctx->scalar_pass);
+ ra_assert(ctx, ctx->scalar_pass);
BITSET_WORD *live =
rzalloc_array(bd, BITSET_WORD, BITSET_WORDS(ctx->alloc_count));
cur_live += new_live;
cur_live -= new_dead;
- assert(cur_live >= 0);
+ ra_assert(ctx, cur_live >= 0);
d("CUR_LIVE: %u", cur_live);
max = MAX2(max, cur_live);
* live)
*/
cur_live -= next_dead;
- assert(cur_live >= 0);
+ ra_assert(ctx, cur_live >= 0);
if (RA_DEBUG) {
unsigned cnt = 0;
BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
cnt += name_size(ctx, name);
}
- assert(cur_live == cnt);
+ ra_assert(ctx, cur_live == cnt);
}
}
* tells us a value is livein. But not used by the block or
* liveout for the block. Possibly a bug in the liverange
* extension. But for now leave the assert disabled:
- assert(cur_live == liveout);
+ ra_assert(ctx, cur_live == liveout);
*/
}
}
*/
if (ctx->scalar_pass && is_tex_or_prefetch(id->defn)) {
unsigned n = ffs(id->defn->regs[0]->wrmask);
- debug_assert(n > 0);
+ ra_assert(ctx, n > 0);
first_component = n - 1;
}
unsigned r = ra_get_node_reg(ctx->g, name);
unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
- debug_assert(!(reg->flags & IR3_REG_RELATIV));
+ ra_assert(ctx, !(reg->flags & IR3_REG_RELATIV));
- debug_assert(num >= first_component);
+ ra_assert(ctx, num >= first_component);
if (is_high(id->defn))
num += FIRST_HIGH_REG;
struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
- debug_assert(!(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
+ ra_assert(ctx, !(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
/* 'base' is in scalar (class 0) but we need to map that
* the conflicting register of the appropriate class (ie.
* .. and so on..
*/
unsigned regid = instr->regs[0]->num;
- assert(regid >= id->off);
+ ra_assert(ctx, regid >= id->off);
regid -= id->off;
unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
static void
ra_precolor_assigned(struct ir3_ra_ctx *ctx)
{
- debug_assert(ctx->scalar_pass);
+ ra_assert(ctx, ctx->scalar_pass);
foreach_block (block, &ctx->ir->block_list) {
foreach_instr (instr, &block->instr_list) {
};
int ret;
+ ret = setjmp(ctx.jmp_env);
+ if (ret)
+ goto fail;
+
ra_init(&ctx);
ra_add_interference(&ctx);
ra_precolor(&ctx, precolor, nprecolor);
if (scalar_pass)
ra_precolor_assigned(&ctx);
ret = ra_alloc(&ctx);
+
+fail:
ra_destroy(&ctx);
return ret;