/* TODO: Check write mask, and possibly not clear everything. */
/* For any usage of our variable on the RHS, clear it out. */
- struct set_entry *set_entry;
set_foreach(entry->dsts, set_entry) {
ir_variable *dst_var = (ir_variable *)set_entry->key;
acp_entry *dst_entry = pull_acp(dst_var);
void remove_dead_variables()
{
- struct set_entry *entry;
-
set_foreach(variables, entry) {
ir_variable *ir = (ir_variable *) entry->key;
new_block->cf_node.parent = block->cf_node.parent;
exec_node_insert_node_before(&block->cf_node.node, &new_block->cf_node.node);
- struct set_entry *entry;
set_foreach(block->predecessors, entry) {
nir_block *pred = (nir_block *) entry->key;
replace_successor(pred, block, new_block);
block->imm_dom = NULL;
block->num_dom_children = 0;
- struct set_entry *entry;
set_foreach(block->dom_frontier, entry) {
_mesa_set_remove(block->dom_frontier, entry);
}
calc_dominance(nir_block *block)
{
nir_block *new_idom = NULL;
- struct set_entry *entry;
set_foreach(block->predecessors, entry) {
nir_block *pred = (nir_block *) entry->key;
calc_dom_frontier(nir_block *block)
{
if (block->predecessors->entries > 1) {
- struct set_entry *entry;
set_foreach(block->predecessors, entry) {
nir_block *runner = (nir_block *) entry->key;
{
nir_foreach_block(block, impl) {
fprintf(fp, "DF(%u) = {", block->index);
- struct set_entry *entry;
set_foreach(block->dom_frontier, entry) {
nir_block *df = (nir_block *) entry->key;
fprintf(fp, "%u, ", df->index);
if (block != def->parent_instr->block) {
/* Try to go up the single-successor tree */
bool all_single_successors = true;
- struct set_entry *entry;
set_foreach(block->predecessors, entry) {
nir_block *pred = (nir_block *)entry->key;
if (pred->successors[0] && pred->successors[1]) {
/* set_vertex_count intrinsics only appear in predecessors of the
* end block. So we don't need to walk all of them.
*/
- struct set_entry *entry;
set_foreach(function->impl->end_block->predecessors, entry) {
nir_block *block = (nir_block *) entry->key;
* changed, add the predecessor to the work list so that we ensure
* that the new information is used.
*/
- struct set_entry *entry;
set_foreach(block->predecessors, entry) {
nir_block *pred = (nir_block *)entry->key;
if (propagate_across_edge(pred, block, &state))
/* Insert the new intrinsic in all of the predecessors of the end block,
* but before any jump instructions (return).
*/
- struct set_entry *entry;
set_foreach(end_block->predecessors, entry) {
nir_block *pred = (nir_block *) entry->key;
b->cursor = nir_after_block_before_jump(pred);
/* For all other shader types, we need to do the copies right before
* the jumps to the end block.
*/
- struct set_entry *block_entry;
set_foreach(impl->end_block->predecessors, block_entry) {
struct nir_block *block = (void *)block_entry->key;
b.cursor = nir_after_block_before_jump(block);
nir_builder b;
nir_builder_init(&b, state->impl);
- struct set_entry *copy_entry;
set_foreach(node->copies, copy_entry) {
nir_intrinsic_instr *copy = (void *)copy_entry->key;
assert(node->path.path[0]->var->constant_initializer == NULL);
if (node->stores) {
- struct set_entry *store_entry;
set_foreach(node->stores, store_entry) {
nir_intrinsic_instr *store =
(nir_intrinsic_instr *)store_entry->key;
assert(header_block->predecessors->entries == 2);
- struct set_entry *pred_entry;
set_foreach(header_block->predecessors, pred_entry) {
if (pred_entry->key != prev_block)
return (nir_block*)pred_entry->key;
while (w_start != w_end) {
nir_block *cur = pb->W[w_start++];
- struct set_entry *dom_entry;
set_foreach(cur->dom_frontier, dom_entry) {
nir_block *next = (nir_block *) dom_entry->key;
* XXX: Calling qsort this many times seems expensive.
*/
int num_preds = 0;
- struct set_entry *entry;
set_foreach(phi->instr.block->predecessors, entry)
preds[num_preds++] = (nir_block *)entry->key;
qsort(preds, num_preds, sizeof(*preds), compare_blocks);
nir_block **preds =
malloc(block->predecessors->entries * sizeof(nir_block *));
- struct set_entry *entry;
unsigned i = 0;
set_foreach(block->predecessors, entry) {
preds[i++] = (nir_block *) entry->key;
if (!var_usage || !var_usage->vars_copied)
continue;
- struct set_entry *copy_entry;
set_foreach(var_usage->vars_copied, copy_entry) {
struct vec_var_usage *copy_usage = (void *)copy_entry->key;
if (copy_usage->comps_kept != var_usage->comps_kept) {
/* Create a phi node with as many sources pointing to the same ssa_def as
* the block has predecessors.
*/
- struct set_entry *entry;
set_foreach(block_after_loop->predecessors, entry) {
nir_phi_src *phi_src = ralloc(phi, nir_phi_src);
phi_src->src = nir_src_for_ssa(def);
static void
batch_reset_resources_locked(struct fd_batch *batch)
{
- struct set_entry *entry;
-
pipe_mutex_assert_locked(batch->ctx->screen->lock);
set_foreach(batch->resources, entry) {
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
batch = cache->batches[i];
debug_printf("%d: needs_flush=%d, depends:", batch->idx, batch->needs_flush);
- struct set_entry *entry;
set_foreach(batch->dependencies, entry) {
struct fd_batch *dep = (struct fd_batch *)entry->key;
debug_printf(" %d", dep->idx);
{
struct ir3_block *block;
struct hash_entry *hentry;
- struct set_entry *sentry;
unsigned i;
hentry = _mesa_hash_table_search(ctx->block_ht, nblock);
v3d_simulator_pin_bos(int fd, struct v3d_job *job)
{
struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
- struct set_entry *entry;
set_foreach(job->bos, entry) {
struct v3d_bo *bo = (struct v3d_bo *)entry->key;
v3d_simulator_unpin_bos(int fd, struct v3d_job *job)
{
struct v3d_simulator_file *file = v3d_get_simulator_file_for_fd(fd);
- struct set_entry *entry;
set_foreach(job->bos, entry) {
struct v3d_bo *bo = (struct v3d_bo *)entry->key;
const unsigned num_end_preds = impl->end_block->predecessors->entries;
nir_block *end_preds[num_end_preds];
unsigned i = 0;
- struct set_entry *entry;
set_foreach(impl->end_block->predecessors, entry) {
end_preds[i++] = (nir_block *) entry->key;
list->array_length * sizeof(*list->relocs));
memcpy(list->reloc_bos, other_list->reloc_bos,
list->array_length * sizeof(*list->reloc_bos));
- struct set_entry *entry;
set_foreach(other_list->deps, entry) {
_mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
}
list->num_relocs += other->num_relocs;
- struct set_entry *entry;
set_foreach(other->deps, entry) {
_mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
}
if (bos == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- struct set_entry *entry;
struct anv_bo **bo = bos;
set_foreach(relocs->deps, entry) {
*bo++ = (void *)entry->key;
_mesa_reference_buffer_object(ctx, &shared->NullBufferObj, NULL);
if (shared->SyncObjects) {
- struct set_entry *entry;
set_foreach(shared->SyncObjects, entry) {
_mesa_unref_sync_object(ctx, (struct gl_sync_object *) entry->key, 1);
}
return;
if (delete_function) {
- struct set_entry *entry;
-
set_foreach (ht, entry) {
delete_function(entry);
}
void
_mesa_set_clear(struct set *set, void (*delete_function)(struct set_entry *entry))
{
- struct set_entry *entry;
-
if (!set)
return;
set_rehash(struct set *ht, unsigned new_size_index)
{
struct set old_ht;
- struct set_entry *table, *entry;
+ struct set_entry *table;
if (new_size_index >= ARRAY_SIZE(hash_sizes))
return;
* insertion (which may rehash the set, making entry a dangling
* pointer).
*/
-#define set_foreach(set, entry) \
- for (entry = _mesa_set_next_entry(set, NULL); \
- entry != NULL; \
+#define set_foreach(set, entry) \
+ for (struct set_entry *entry = _mesa_set_next_entry(set, NULL); \
+ entry != NULL; \
entry = _mesa_set_next_entry(set, entry))
#ifdef __cplusplus