New IR, or NIR, is an IR for Mesa intended to sit below GLSL IR and Mesa IR.
-Its design inherits from the various IR's that Mesa has used in the past, as
+Its design inherits from the various IRs that Mesa has used in the past, as
well as Direct3D assembly, and it includes a few new ideas as well. It is a
flat (in terms of using instructions instead of expressions), typeless IR,
similar to TGSI and Mesa IR. It also supports SSA (although it doesn't require
_mesa_key_pointer_equal);
block->imm_dom = NULL;
/* XXX maybe it would be worth it to defer allocation? This
- * way it doesn't get allocated for shader ref's that never run
+ * way it doesn't get allocated for shader refs that never run
* nir_calc_dominance? For example, state-tracker creates an
* initial IR, clones that, runs appropriate lowering pass, passes
* to driver which does common lowering/opt, and then stores ref
*/
bool is_packed;
- /** set of nir_src's where this register is used (read from) */
+ /** set of nir_srcs where this register is used (read from) */
struct list_head uses;
- /** set of nir_dest's where this register is defined (written to) */
+ /** set of nir_dests where this register is defined (written to) */
struct list_head defs;
- /** set of nir_if's where this register is used as a condition */
+ /** set of nir_ifs where this register is used as a condition */
struct list_head if_uses;
} nir_register;
nir_instr *parent_instr;
- /** set of nir_instr's where this register is used (read from) */
+ /** set of nir_instrs where this register is used (read from) */
struct list_head uses;
- /** set of nir_if's where this register is used as a condition */
+ /** set of nir_ifs where this register is used as a condition */
struct list_head if_uses;
uint8_t num_components;
typedef struct {
nir_instr instr;
- /* A list of nir_parallel_copy_entry's. The sources of all of the
+ /* A list of nir_parallel_copy_entrys. The sources of all of the
* entries are copied to the corresponding destinations "in parallel".
* In other words, if we have two entries: a -> b and b -> a, the values
* get swapped.
unsigned num_dom_children;
struct nir_block **dom_children;
- /* Set of nir_block's on the dominance frontier of this block */
+ /* Set of nir_blocks on the dominance frontier of this block */
struct set *dom_frontier;
/*
}
/**
- * Similar to nir_ssa_for_src(), but for alu src's, respecting the
+ * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
* nir_alu_src's swizzle.
*/
static inline nir_ssa_def *
return nc;
}
-/* NOTE: for cloning nir_variable's, bypass nir_variable_create to avoid
+/* NOTE: for cloning nir_variables, bypass nir_variable_create to avoid
* having to deal with locals and globals separately:
*/
nir_variable *
}
}
-/* NOTE: for cloning nir_register's, bypass nir_global/local_reg_create()
+/* NOTE: for cloning nir_registers, bypass nir_global/local_reg_create()
* to avoid having to deal with locals and globals separately:
*/
static nir_register *
/* At first glance, it looks like we should clone the function_impl here.
* However, call instructions need to be able to reference at least the
- * function and those will get processed as we clone the function_impl's.
+ * function and those will get processed as we clone the function_impls.
* We stop here and do function_impls as a second pass.
*/
clone_function(&state, fxn, ns);
/* Only after all functions are cloned can we clone the actual function
- * implementations. This is because nir_call_instr's need to reference the
+ * implementations. This is because nir_call_instrs need to reference the
* functions of other functions and we don't know what order the functions
* will have in the list.
*/
}
/* Any phi nodes must stay part of the new block, or else their
- * sourcse will be messed up. This will reverse the order of the phi's, but
+ * sourcse will be messed up. This will reverse the order of the phis, but
* order shouldn't matter.
*/
nir_foreach_instr_safe(instr, block) {
/** NIR Control Flow Modification
*
- * This file contains various API's that make modifying control flow in NIR,
+ * This file contains various APIs that make modifying control flow in NIR,
* while maintaining the invariants checked by the validator, much easier.
* There are two parts to this:
*
- * 1. Inserting control flow (if's and loops) in various places, for creating
+ * 1. Inserting control flow (ifs and loops) in various places, for creating
* IR either from scratch or as part of some lowering pass.
* 2. Taking existing pieces of the IR and either moving them around or
* deleting them.
* predecessors:
*
* 1) After an if statement, if neither branch ends in a jump.
- * 2) After a loop, if there are multiple break's.
+ * 2) After a loop, if there are multiple breaks.
* 3) At the beginning of a loop.
*
* For #1, the phi node is considered to be part of the if, and for #2 and
* #3 the phi node is considered to be part of the loop. This allows us to
- * keep phi's intact, but it means that phi nodes cannot be separated from
+ * keep phis intact, but it means that phi nodes cannot be separated from
* the control flow they come from. For example, extracting an if without
* extracting all the phi nodes after it is not allowed, and neither is
* extracting only some of the phi nodes at the beginning of a block. It
* Each SSA definition is associated with a merge_node and the association
* is represented by a combination of a hash table and the "def" parameter
* in the merge_node structure. The merge_set stores a linked list of
- * merge_node's in dominence order of the ssa definitions. (Since the
+ * merge_nodes in dominence order of the ssa definitions. (Since the
* liveness analysis pass indexes the SSA values in dominence order for us,
* this is an easy thing to keep up.) It is assumed that no pair of the
* nodes in a given set interfere. Merging two sets or checking for
last_phi_instr = instr;
}
- /* If we don't have any phi's, then there's nothing for us to do. */
+ /* If we don't have any phis, then there's nothing for us to do. */
if (last_phi_instr == NULL)
return true;
nir_builder_instr_insert(b, &mov->instr);
}
-/* Resolves a single parallel copy operation into a sequence of mov's
+/* Resolves a single parallel copy operation into a sequence of movs
*
* This is based on Algorithm 1 from "Revisiting Out-of-SSA Translation for
* Correctness, Code Quality, and Efficiency" by Boissinot et. al..
nir_instr_insert(nir_after_block_before_jump(block), &mov->instr);
}
-/** Lower all of the phi nodes in a block to imov's to and from a register
+/** Lower all of the phi nodes in a block to imovs to and from a register
*
* This provides a very quick-and-dirty out-of-SSA pass that you can run on a
- * single block to convert all of it's phis to a register and some imov's.
+ * single block to convert all of its phis to a register and some imovs.
* The code that is generated, while not optimal for actual codegen in a
* back-end, is easy to generate, correct, and will turn into the same set of
* phis after you call regs_to_ssa and do some copy propagation.
*/
assert(!var->in_control_flow && var->type != invariant);
- /* We are only interested in checking phi's for the basic induction
+ /* We are only interested in checking phis for the basic induction
* variable case as its simple to detect. All basic induction variables
* have a phi node
*/
get_loop_info(loop_info_state *state, nir_function_impl *impl)
{
/* Initialize all variables to "outside_loop". This also marks defs
- * invariant and constant if they are nir_instr_type_load_const's
+ * invariant and constant if they are nir_instr_type_load_consts
*/
nir_foreach_block(block, impl) {
nir_foreach_instr(instr, block)
/* Generates the lowering code for user-clip-planes, generating CLIPDIST
* from UCP[n] + CLIPVERTEX or POSITION. Additionally, an optional pass
- * for fragment shaders to insert conditional kill's based on the inter-
+ * for fragment shaders to insert conditional kills based on the inter-
* polated CLIPDIST
*
* NOTE: should be run after nir_lower_outputs_to_temporaries() (or at
* should be only a single predecessor block to end_block, which
* makes the perfect place to insert the clipdist calculations.
*
- * NOTE: in case of early return's, these would have to be lowered
+ * NOTE: in case of early returns, these would have to be lowered
* to jumps to end_block predecessor in a previous pass. Not sure
* if there is a good way to sanity check this, but for now the
* users of this pass don't support sub-routines.
nir_metadata_preserve(impl, nir_metadata_dominance);
}
-/* ucp_enables is bitmask of enabled ucp's. Actual ucp values are
+/* ucp_enables is bitmask of enabled ucps. Actual ucp values are
* passed in to shader via user_clip_plane system-values
*/
void
nir_variable *var = dvar->var;
if (var->data.location == VARYING_SLOT_COL0) {
- /* gl_Color should not have array/struct deref's: */
+ /* gl_Color should not have array/struct derefs: */
assert(dvar->deref.child == NULL);
lower_color(state, intr);
} else if (var->data.location == VARYING_SLOT_TEX0) {
- /* gl_TexCoord should not have array/struct deref's: */
+ /* gl_TexCoord should not have array/struct derefs: */
assert(dvar->deref.child == NULL);
lower_texcoord(state, intr);
}
nir_foreach_variable(var, var_list) {
/*
- * UBO's have their own address spaces, so don't count them towards the
+ * UBOs have their own address spaces, so don't count them towards the
* number of global uniforms
*/
if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) &&
nvar->data = var->data;
nvar->data.location += off;
- /* nir_variable_create is too clever for it's own good: */
+ /* nir_variable_create is too clever for its own good: */
exec_node_remove(&nvar->node);
exec_node_self_link(&nvar->node); /* no delinit() :-( */
if (swizzle[0] < 4 && swizzle[1] < 4 &&
swizzle[2] < 4 && swizzle[3] < 4) {
unsigned swiz[4] = { swizzle[0], swizzle[1], swizzle[2], swizzle[3] };
- /* We have no 0's or 1's, just emit a swizzling MOV */
+ /* We have no 0s or 1s, just emit a swizzling MOV */
swizzled = nir_swizzle(b, &tex->dest.ssa, swiz, 4, false);
} else {
nir_ssa_def *srcs[4];
if (var->data.mode == nir_var_shader_in &&
var->data.location == VARYING_SLOT_POS) {
- /* gl_FragCoord should not have array/struct deref's: */
+ /* gl_FragCoord should not have array/struct derefs: */
assert(dvar->deref.child == NULL);
add_half_to_fragcoord(b, intr);
progress = true;
if (var->data.mode == nir_var_shader_in &&
var->data.location == VARYING_SLOT_POS) {
- /* gl_FragCoord should not have array/struct deref's: */
+ /* gl_FragCoord should not have array/struct derefs: */
assert(dvar->deref.child == NULL);
lower_fragcoord(state, intr);
} else if (var->data.mode == nir_var_system_value &&
* ssa_2 = fadd(ssa_1.x, ssa_1.y)
*
* While this is "worse" because it adds a bunch of unneeded dependencies, it
- * actually makes it much easier for vec4-based backends to coalesce the MOV's
+ * actually makes it much easier for vec4-based backends to coalesce the MOVs
* that result from the vec4 operation because it doesn't have to worry about
* quite as many reads.
*/
*/
/*
- * Visits and CSE's the given block and all its descendants in the dominance
+ * Visits and CSEs the given block and all its descendants in the dominance
* tree recursively. Note that the instr_set is guaranteed to only ever
* contain instructions that dominate the current block.
*/
if (!list_empty(&mov->dest.dest.ssa.if_uses))
return false;
- /* The only uses of this definition must be phi's in the successor */
+ /* The only uses of this definition must be phis in the successor */
nir_foreach_use(use, &mov->dest.dest.ssa) {
if (use->parent_instr->type != nir_instr_type_phi ||
use->parent_instr->block != block->successors[0])
assert(def != NULL);
if (mov) {
- /* If the sources were all mov's from the same source with the same
+ /* If the sources were all movs from the same source with the same
* swizzle, then we can't just pick a random move because it may not
* dominate the phi node. Instead, we need to emit our own move after
* the phi which uses the shared source, and rewrite uses of the phi
- * to use the move instead. This is ok, because while the mov's may
+ * to use the move instead. This is ok, because while the movs may
* not all dominate the phi node, their shared source does.
*/
{
nir_const_value *val = nir_src_as_const_value(instr->src[src].src);
- /* only constant src's: */
+ /* only constant srcs: */
if (!val)
return false;
{
nir_const_value *val = nir_src_as_const_value(instr->src[src].src);
- /* only constant src's: */
+ /* only constant srcs: */
if (!val)
return false;