unsigned index = 0;
nir_foreach_block(block, impl) {
- nir_foreach_instr(block, instr)
+ nir_foreach_instr(instr, block)
nir_foreach_ssa_def(instr, index_ssa_def_cb, &index);
}
unsigned index = 0;
nir_foreach_block(block, impl) {
- nir_foreach_instr(block, instr)
+ nir_foreach_instr(instr, block)
instr->index = index++;
}
return exec_node_data(nir_instr, tail, node);
}
-#define nir_foreach_instr(block, instr) \
+#define nir_foreach_instr(instr, block) \
foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
-#define nir_foreach_instr_reverse(block, instr) \
+#define nir_foreach_instr_reverse(instr, block) \
foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
-#define nir_foreach_instr_safe(block, instr) \
+#define nir_foreach_instr_safe(instr, block) \
foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
-#define nir_foreach_instr_reverse_safe(block, instr) \
+#define nir_foreach_instr_reverse_safe(instr, block) \
foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
typedef struct nir_if {
nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node));
assert(block->cf_node.type == nir_cf_node_block);
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_phi)
return nir_before_instr(instr);
}
{
bool progress = false;
- nir_foreach_instr_reverse_safe(block, instr) {
+ nir_foreach_instr_reverse_safe(instr, block) {
if (instr->type != nir_instr_type_alu)
continue;
/* We need this for phi sources */
add_remap(state, nblk, blk);
- nir_foreach_instr(blk, instr) {
+ nir_foreach_instr(instr, blk) {
if (instr->type == nir_instr_type_phi) {
/* Phi instructions are a bit of a special case when cloning because
* we don't want inserting the instruction to automatically handle
* sourcse will be messed up. This will reverse the order of the phi's, but
* order shouldn't matter.
*/
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
static void
rewrite_phi_preds(nir_block *block, nir_block *old_pred, nir_block *new_pred)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
insert_phi_undef(nir_block *block, nir_block *pred)
{
nir_function_impl *impl = nir_cf_node_get_function(&block->cf_node);
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
assert(instr->type != nir_instr_type_phi);
nir_block *new_block = split_block_beginning(instr->block);
- nir_foreach_instr_safe(instr->block, cur_instr) {
+ nir_foreach_instr_safe(cur_instr, instr->block) {
if (cur_instr == instr)
break;
static void
remove_phi_src(nir_block *block, nir_block *pred)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
case nir_cf_node_block: {
nir_block *block = nir_cf_node_as_block(node);
/* We need to walk the instructions and clean up defs/uses */
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_jump) {
nir_jump_type jump_type = nir_instr_as_jump(instr)->type;
unlink_jump(block, jump_type, false);
isolate_phi_nodes_block(nir_block *block, void *dead_ctx)
{
nir_instr *last_phi_instr = NULL;
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi)
break;
nir_parallel_copy_instr_create(dead_ctx);
nir_instr_insert_after(last_phi_instr, &block_pcopy->instr);
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi)
break;
static bool
coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi)
break;
aggressive_coalesce_block(nir_block *block, struct from_ssa_state *state)
{
nir_parallel_copy_instr *start_pcopy = NULL;
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi) {
if (instr->type != nir_instr_type_parallel_copy)
static bool
resolve_registers_block(nir_block *block, struct from_ssa_state *state)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
state->instr = instr;
nir_foreach_ssa_def(instr, rewrite_ssa_def, state);
static bool
gather_info_block(nir_block *block, void *shader)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
switch (instr->type) {
case nir_instr_type_intrinsic:
gather_intrinsic_info(nir_instr_as_intrinsic(instr), shader);
set_foreach(function->impl->end_block->predecessors, entry) {
nir_block *block = (nir_block *) entry->key;
- nir_foreach_instr_reverse(block, instr) {
+ nir_foreach_instr_reverse(instr, block) {
nir_intrinsic_instr *intrin = as_set_vertex_count(instr);
if (!intrin)
continue;
static bool
rewrite_param_derefs_block(nir_block *block, nir_call_instr *call)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
static bool
lower_params_to_locals_block(nir_block *block, nir_function_impl *impl)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
* properly get moved to the next block when it gets split, and we
* continue iterating there.
*/
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_call)
continue;
NIR_VLA(BITSET_WORD, live, state->bitset_words);
memcpy(live, succ->live_in, state->bitset_words * sizeof *live);
- nir_foreach_instr(succ, instr) {
+ nir_foreach_instr(instr, succ) {
if (instr->type != nir_instr_type_phi)
break;
nir_phi_instr *phi = nir_instr_as_phi(instr);
set_ssa_def_dead(&phi->dest.ssa, live);
}
- nir_foreach_instr(succ, instr) {
+ nir_foreach_instr(instr, succ) {
if (instr->type != nir_instr_type_phi)
break;
nir_phi_instr *phi = nir_instr_as_phi(instr);
*/
state.num_ssa_defs = 1;
nir_foreach_block(block, impl) {
- nir_foreach_instr(block, instr)
+ nir_foreach_instr(instr, block)
nir_foreach_ssa_def(instr, index_ssa_def, &state);
}
if (following_if)
set_src_live(&following_if->condition, block->live_in);
- nir_foreach_instr_reverse(block, instr) {
+ nir_foreach_instr_reverse(instr, block) {
/* Phi nodes are handled seperately so we want to skip them. Since
* we are going backwards and they are at the beginning, we can just
* break as soon as we see one.
nir_builder_init(&builder, impl);
nir_foreach_block(block, impl) {
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_alu)
lower_alu_instr_scalar(nir_instr_as_alu(instr), &builder);
}
nir_foreach_function(shader, function) {
if (function->impl) {
nir_foreach_block(block, function->impl) {
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_intrinsic)
lower_instr(nir_instr_as_intrinsic(instr),
shader_program, shader);
static nir_ssa_def *
find_output_in_block(nir_block *block, unsigned drvloc)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type == nir_instr_type_intrinsic) {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
{
nir_lower_doubles_options options = *((nir_lower_doubles_options *) ctx);
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_alu)
continue;
{
nir_builder *b = (nir_builder *) ctx;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_alu)
continue;
mark_global_var_uses_block(nir_block *block, nir_function_impl *impl,
struct hash_table *var_func_table)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
static bool
rewrite_intrinsics(nir_block *block, struct state *state)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_builder_init(&b, impl);
nir_foreach_block(block, impl) {
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_alu)
convert_instr(&b, nir_instr_as_alu(instr));
}
{
bool progress = false;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
{
nir_builder *b = &state->builder;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_lower_load_const_to_scalar_impl(nir_function_impl *impl)
{
nir_foreach_block(block, impl) {
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_load_const)
lower_load_const_instr_scalar(nir_instr_as_load_const(instr));
}
lower_locals_to_regs_block(nir_block *block,
struct locals_to_regs_state *state)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
static bool
emit_output_copies_block(nir_block *block, void *state)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
{
/* Find the last phi node in the block */
nir_phi_instr *last_phi = NULL;
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
/* We have to handle the phi nodes in their own pass due to the way
* we're modifying the linked list of instructions.
*/
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
nir_builder_init(&b, impl);
nir_foreach_block(block, impl) {
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type == nir_instr_type_tex)
lower_sampler(nir_instr_as_tex(instr), shader_program, stage, &b);
}
{
bool progress = false;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
const nir_lower_tex_options *options = state->options;
nir_builder *b = &state->b;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_tex)
continue;
static bool
nir_lower_to_source_mods_block(nir_block *block)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_alu)
continue;
lower_2side_state *state = void_state;
nir_builder *b = &state->b;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
void *mem_ctx = ralloc_parent(impl);
nir_foreach_block(block, impl) {
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
register_variable_uses_block(nir_block *block,
struct lower_variables_state *state)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_builder b;
nir_builder_init(&b, state->impl);
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
bool progress = false;
nir_shader *shader = impl->function->shader;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_alu)
continue;
static bool
move_vec_src_uses_to_dest_block(nir_block *block)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_alu)
continue;
{
bool progress = false;
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_tex)
continue;
{
bool progress = false;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
switch (instr->type) {
case nir_instr_type_alu:
progress |= constant_fold_alu_instr(nir_instr_as_alu(instr), mem_ctx);
bool progress = false;
nir_foreach_block(block, impl) {
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (copy_prop_instr(instr))
progress = true;
}
{
bool progress = false;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (nir_instr_set_add_or_rewrite(instr_set, instr)) {
progress = true;
nir_instr_remove(instr);
progress |= cse_block(child, instr_set);
}
- nir_foreach_instr(block, instr)
+ nir_foreach_instr(instr, block)
nir_instr_set_remove(instr_set, instr);
return progress;
static bool
init_block(nir_block *block, struct exec_list *worklist)
{
- nir_foreach_instr(block, instr)
+ nir_foreach_instr(instr, block)
init_instr(instr, worklist);
nir_if *following_if = nir_block_get_following_if(block);
bool progress = false;
nir_foreach_block(block, impl) {
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (!instr->pass_flags) {
nir_instr_remove(instr);
progress = true;
nir_cf_node_as_block(condition ? nir_if_last_then_node(if_stmt)
: nir_if_last_else_node(if_stmt));
- nir_foreach_instr_safe(after, instr) {
+ nir_foreach_instr_safe(instr, after) {
if (instr->type != nir_instr_type_phi)
break;
cf_node_has_side_effects(nir_cf_node *node)
{
nir_foreach_block_in_cf_node(block, node) {
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type == nir_instr_type_call)
return true;
nir_metadata_dominance);
for (nir_block *cur = after->imm_dom; cur != before; cur = cur->imm_dom) {
- nir_foreach_instr(cur, instr) {
+ nir_foreach_instr(instr, cur) {
if (!nir_foreach_ssa_def(instr, def_not_live_out, after))
return false;
}
static bool
gcm_pin_instructions_block(nir_block *block, struct gcm_state *state)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
switch (instr->type) {
case nir_instr_type_alu:
switch (nir_instr_as_alu(instr)->op) {
static bool
block_check_for_allowed_instrs(nir_block *block)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
switch (instr->type) {
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
* block before. We have already guaranteed that this is safe by
* calling block_check_for_allowed_instrs()
*/
- nir_foreach_instr_safe(then_block, instr) {
+ nir_foreach_instr_safe(instr, then_block) {
exec_node_remove(&instr->node);
instr->block = prev_block;
exec_list_push_tail(&prev_block->instr_list, &instr->node);
}
- nir_foreach_instr_safe(else_block, instr) {
+ nir_foreach_instr_safe(instr, else_block) {
exec_node_remove(&instr->node);
instr->block = prev_block;
exec_list_push_tail(&prev_block->instr_list, &instr->node);
}
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
{
bool progress = false;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
nir_foreach_function(shader, function) {
if (function->impl) {
nir_foreach_block(block, function->impl) {
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_alu)
if (opt_undef_alu(nir_instr_as_alu(instr)))
progress = true;
free(preds);
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
print_instr(instr, state, tabs);
fprintf(fp, "\n");
}
nir_foreach_function(shader, function) {
if (function->impl) {
nir_foreach_block(block, function->impl) {
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
switch(instr->type) {
case nir_instr_type_intrinsic:
add_var_use_intrinsic(nir_instr_as_intrinsic(instr), live);
nir_metadata_dominance);
nir_foreach_block(block, impl) {
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
nir_foreach_ssa_def(instr, repair_ssa_def, &state);
}
}
static bool
split_var_copies_block(nir_block *block, struct split_var_copies_state *state)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
{
ralloc_steal(nir, block);
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
ralloc_steal(nir, instr);
nir_foreach_src(instr, sweep_src_indirect, nir);
static void
rewrite_phi_sources(nir_block *block, nir_block *pred, rewrite_state *state)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
* what we want because those instructions (vector gather, conditional
* select) will already be in SSA form.
*/
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
rewrite_instr_forward(instr, state);
}
for (unsigned i = 0; i < block->num_dom_children; i++)
rewrite_block(block->dom_children[i], state);
- nir_foreach_instr_reverse(block, instr) {
+ nir_foreach_instr_reverse(instr, block) {
rewrite_instr_backwards(instr, state);
}
}
static void
validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
{
- nir_foreach_instr(succ, instr) {
+ nir_foreach_instr(instr, succ) {
if (instr->type != nir_instr_type_phi)
break;
state->block = block;
exec_list_validate(&block->instr_list);
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type == nir_instr_type_phi) {
assert(instr == nir_block_first_instr(block) ||
nir_instr_prev(instr)->type == nir_instr_type_phi);
}
nir_foreach_block(block, impl) {
- nir_foreach_instr(block, instr)
+ nir_foreach_instr(instr, block)
nir_foreach_ssa_def(instr, postvalidate_ssa_def, state);
}
}
_mesa_hash_table_destroy(ctx->addr_ht, NULL);
ctx->addr_ht = NULL;
- nir_foreach_instr(nblock, instr) {
+ nir_foreach_instr(instr, nblock) {
emit_instr(ctx, instr);
if (ctx->error)
return;
static bool
block_check_for_allowed_instrs(nir_block *block)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
switch (instr->type) {
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
flatten_block(nir_builder *bld, nir_block *if_block, nir_block *prev_block,
nir_ssa_def *condition, bool invert)
{
- nir_foreach_instr_safe(if_block, instr) {
+ nir_foreach_instr_safe(instr, if_block) {
if (instr->type == nir_instr_type_intrinsic) {
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
if ((intr->intrinsic == nir_intrinsic_discard) ||
flatten_block(&state->b, else_block, prev_block,
if_stmt->condition.ssa, true);
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_phi)
break;
{
struct vc4_compile *c = state;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
nir_builder b;
nir_builder_init(&b, impl);
- nir_foreach_instr_safe(block, instr)
+ nir_foreach_instr_safe(instr, block)
vc4_nir_lower_io_instr(c, &b, instr);
return true;
nir_builder b;
nir_builder_init(&b, impl);
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type == nir_instr_type_tex) {
vc4_nir_lower_txf_ms_instr(c, &b,
nir_instr_as_tex(instr));
static void
ntq_emit_block(struct vc4_compile *c, nir_block *block)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
ntq_emit_instr(c, instr);
}
}
count_nir_instrs_in_block(nir_block *block, void *state)
{
int *count = (int *) state;
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
*count = *count + 1;
}
return true;
{
struct anv_descriptor_set_layout *set_layout;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
get_used_bindings_block(nir_block *block,
struct apply_pipeline_layout_state *state)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
switch (instr->type) {
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
apply_pipeline_layout_block(nir_block *block,
struct apply_pipeline_layout_state *state)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
switch (instr->type) {
case nir_instr_type_intrinsic: {
nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
continue;
nir_foreach_block(block, function->impl) {
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
{
fs_reg *reg;
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
void
fs_visitor::nir_emit_block(nir_block *block)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
nir_emit_instr(instr);
}
}
add_const_offset_to_base_block(nir_block *block, nir_builder *b,
nir_variable_mode mode)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
static bool
remap_vs_attrs(nir_block *block, GLbitfield64 inputs_read)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
static bool
remap_inputs_with_vue_map(nir_block *block, const struct brw_vue_map *vue_map)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
remap_patch_urb_offsets(nir_block *block, nir_builder *b,
const struct brw_vue_map *vue_map)
{
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
static bool
analyze_boolean_resolves_block(nir_block *block)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
switch (instr->type) {
case nir_instr_type_alu: {
/* For ALU instructions, the resolve status is handled in a
{
nir_builder *b = &state->builder;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
{
bool progress = false;
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_alu)
continue;
static bool
setup_system_values_block(nir_block *block, vec4_visitor *v)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
void
vec4_visitor::nir_emit_block(nir_block *block)
{
- nir_foreach_instr(block, instr) {
+ nir_foreach_instr(instr, block) {
nir_emit_instr(instr);
}
}