nir_shader *
nir_shader_create(void *mem_ctx,
gl_shader_stage stage,
- const nir_shader_compiler_options *options)
+ const nir_shader_compiler_options *options,
+ shader_info *si)
{
- nir_shader *shader = ralloc(mem_ctx, nir_shader);
+ nir_shader *shader = rzalloc(mem_ctx, nir_shader);
exec_list_make_empty(&shader->uniforms);
exec_list_make_empty(&shader->inputs);
exec_list_make_empty(&shader->shared);
shader->options = options;
- memset(&shader->info, 0, sizeof(shader->info));
+
+ shader->info = si ? si : rzalloc(shader, shader_info);
exec_list_make_empty(&shader->functions);
exec_list_make_empty(&shader->registers);
if ((mode == nir_var_shader_in && shader->stage != MESA_SHADER_VERTEX) ||
(mode == nir_var_shader_out && shader->stage != MESA_SHADER_FRAGMENT))
- var->data.interpolation = INTERP_QUALIFIER_SMOOTH;
+ var->data.interpolation = INTERP_MODE_SMOOTH;
if (mode == nir_var_shader_in || mode == nir_var_uniform)
var->data.read_only = true;
nir_block *
nir_block_create(nir_shader *shader)
{
- nir_block *block = ralloc(shader, nir_block);
+ nir_block *block = rzalloc(shader, nir_block);
cf_init(&block->cf_node, nir_cf_node_block);
_mesa_key_pointer_equal);
block->imm_dom = NULL;
/* XXX maybe it would be worth it to defer allocation? This
- * way it doesn't get allocated for shader ref's that never run
+ * way it doesn't get allocated for shader refs that never run
* nir_calc_dominance? For example, state-tracker creates an
* initial IR, clones that, runs appropriate lowering pass, passes
* to driver which does common lowering/opt, and then stores ref
nir_loop *
nir_loop_create(nir_shader *shader)
{
- nir_loop *loop = ralloc(shader, nir_loop);
+ nir_loop *loop = rzalloc(shader, nir_loop);
cf_init(&loop->cf_node, nir_cf_node_loop);
nir_alu_instr_create(nir_shader *shader, nir_op op)
{
unsigned num_srcs = nir_op_infos[op].num_inputs;
+ /* TODO: don't use rzalloc */
nir_alu_instr *instr =
- ralloc_size(shader,
- sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
+ rzalloc_size(shader,
+ sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
instr_init(&instr->instr, nir_instr_type_alu);
instr->op = op;
nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
{
unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
+ /* TODO: don't use rzalloc */
nir_intrinsic_instr *instr =
- ralloc_size(shader,
+ rzalloc_size(shader,
sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
instr_init(&instr->instr, nir_instr_type_intrinsic);
return instr;
}
+void
+nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx)
+{
+ assert(src_idx < tex->num_srcs);
+
+ /* First rewrite the source to NIR_SRC_INIT */
+ nir_instr_rewrite_src(&tex->instr, &tex->src[src_idx].src, NIR_SRC_INIT);
+
+ /* Now, move all of the other sources down */
+ for (unsigned i = src_idx + 1; i < tex->num_srcs; i++) {
+ tex->src[i-1].src_type = tex->src[i].src_type;
+ nir_instr_move_src(&tex->instr, &tex->src[i-1].src, &tex->src[i].src);
+ }
+ tex->num_srcs--;
+}
+
nir_phi_instr *
nir_phi_instr_create(nir_shader *shader)
{
return deref;
}
-static nir_deref_var *
-copy_deref_var(void *mem_ctx, nir_deref_var *deref)
+nir_deref_var *
+nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx)
{
+ if (deref == NULL)
+ return NULL;
+
nir_deref_var *ret = nir_deref_var_create(mem_ctx, deref->var);
ret->deref.type = deref->deref.type;
if (deref->deref.child)
- ret->deref.child = nir_copy_deref(ret, deref->deref.child);
+ ret->deref.child = nir_deref_clone(deref->deref.child, ret);
return ret;
}
static nir_deref_array *
-copy_deref_array(void *mem_ctx, nir_deref_array *deref)
+deref_array_clone(const nir_deref_array *deref, void *mem_ctx)
{
nir_deref_array *ret = nir_deref_array_create(mem_ctx);
ret->base_offset = deref->base_offset;
}
ret->deref.type = deref->deref.type;
if (deref->deref.child)
- ret->deref.child = nir_copy_deref(ret, deref->deref.child);
+ ret->deref.child = nir_deref_clone(deref->deref.child, ret);
return ret;
}
static nir_deref_struct *
-copy_deref_struct(void *mem_ctx, nir_deref_struct *deref)
+deref_struct_clone(const nir_deref_struct *deref, void *mem_ctx)
{
nir_deref_struct *ret = nir_deref_struct_create(mem_ctx, deref->index);
ret->deref.type = deref->deref.type;
if (deref->deref.child)
- ret->deref.child = nir_copy_deref(ret, deref->deref.child);
+ ret->deref.child = nir_deref_clone(deref->deref.child, ret);
return ret;
}
nir_deref *
-nir_copy_deref(void *mem_ctx, nir_deref *deref)
+nir_deref_clone(const nir_deref *deref, void *mem_ctx)
{
+ if (deref == NULL)
+ return NULL;
+
switch (deref->deref_type) {
case nir_deref_type_var:
- return ©_deref_var(mem_ctx, nir_deref_as_var(deref))->deref;
+ return &nir_deref_var_clone(nir_deref_as_var(deref), mem_ctx)->deref;
case nir_deref_type_array:
- return ©_deref_array(mem_ctx, nir_deref_as_array(deref))->deref;
+ return &deref_array_clone(nir_deref_as_array(deref), mem_ctx)->deref;
case nir_deref_type_struct:
- return ©_deref_struct(mem_ctx, nir_deref_as_struct(deref))->deref;
+ return &deref_struct_clone(nir_deref_as_struct(deref), mem_ctx)->deref;
default:
unreachable("Invalid dereference type");
}
return NULL;
}
+/* This is the second step in the recursion. We've found the tail and made a
+ * copy. Now we need to iterate over all possible leaves and call the
+ * callback on each one.
+ */
+static bool
+deref_foreach_leaf_build_recur(nir_deref_var *deref, nir_deref *tail,
+ nir_deref_foreach_leaf_cb cb, void *state)
+{
+ unsigned length;
+ union {
+ nir_deref_array arr;
+ nir_deref_struct str;
+ } tmp;
+
+ assert(tail->child == NULL);
+ switch (glsl_get_base_type(tail->type)) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_BOOL:
+ if (glsl_type_is_vector_or_scalar(tail->type))
+ return cb(deref, state);
+ /* Fall Through */
+
+ case GLSL_TYPE_ARRAY:
+ tmp.arr.deref.deref_type = nir_deref_type_array;
+ tmp.arr.deref.type = glsl_get_array_element(tail->type);
+ tmp.arr.deref_array_type = nir_deref_array_type_direct;
+ tmp.arr.indirect = NIR_SRC_INIT;
+ tail->child = &tmp.arr.deref;
+
+ length = glsl_get_length(tail->type);
+ for (unsigned i = 0; i < length; i++) {
+ tmp.arr.deref.child = NULL;
+ tmp.arr.base_offset = i;
+ if (!deref_foreach_leaf_build_recur(deref, &tmp.arr.deref, cb, state))
+ return false;
+ }
+ return true;
+
+ case GLSL_TYPE_STRUCT:
+ tmp.str.deref.deref_type = nir_deref_type_struct;
+ tail->child = &tmp.str.deref;
+
+ length = glsl_get_length(tail->type);
+ for (unsigned i = 0; i < length; i++) {
+ tmp.arr.deref.child = NULL;
+ tmp.str.deref.type = glsl_get_struct_field(tail->type, i);
+ tmp.str.index = i;
+ if (!deref_foreach_leaf_build_recur(deref, &tmp.arr.deref, cb, state))
+ return false;
+ }
+ return true;
+
+ default:
+ unreachable("Invalid type for dereference");
+ }
+}
+
+/* This is the first step of the foreach_leaf recursion. In this step we are
+ * walking to the end of the deref chain and making a copy in the stack as we
+ * go. This is because we don't want to mutate the deref chain that was
+ * passed in by the caller. The downside is that this deref chain is on the
+ * stack and , if the caller wants to do anything with it, they will have to
+ * make their own copy because this one will go away.
+ */
+static bool
+deref_foreach_leaf_copy_recur(nir_deref_var *deref, nir_deref *tail,
+ nir_deref_foreach_leaf_cb cb, void *state)
+{
+ union {
+ nir_deref_array arr;
+ nir_deref_struct str;
+ } c;
+
+ if (tail->child) {
+ switch (tail->child->deref_type) {
+ case nir_deref_type_array:
+ c.arr = *nir_deref_as_array(tail->child);
+ tail->child = &c.arr.deref;
+ return deref_foreach_leaf_copy_recur(deref, &c.arr.deref, cb, state);
+
+ case nir_deref_type_struct:
+ c.str = *nir_deref_as_struct(tail->child);
+ tail->child = &c.str.deref;
+ return deref_foreach_leaf_copy_recur(deref, &c.str.deref, cb, state);
+
+ case nir_deref_type_var:
+ default:
+ unreachable("Invalid deref type for a child");
+ }
+ } else {
+ /* We've gotten to the end of the original deref. Time to start
+ * building our own derefs.
+ */
+ return deref_foreach_leaf_build_recur(deref, tail, cb, state);
+ }
+}
+
+/**
+ * This function iterates over all of the possible derefs that can be created
+ * with the given deref as the head. It then calls the provided callback with
+ * a full deref for each one.
+ *
+ * The deref passed to the callback will be allocated on the stack. You will
+ * need to make a copy if you want it to hang around.
+ */
+bool
+nir_deref_foreach_leaf(nir_deref_var *deref,
+ nir_deref_foreach_leaf_cb cb, void *state)
+{
+ nir_deref_var copy = *deref;
+ return deref_foreach_leaf_copy_recur(©, ©.deref, cb, state);
+}
+
/* Returns a load_const instruction that represents the constant
* initializer for the given deref chain. The caller is responsible for
* ensuring that there actually is a constant initializer.
assert(constant);
const nir_deref *tail = &deref->deref;
- unsigned matrix_offset = 0;
+ unsigned matrix_col = 0;
while (tail->child) {
switch (tail->child->deref_type) {
case nir_deref_type_array: {
assert(arr->deref_array_type == nir_deref_array_type_direct);
if (glsl_type_is_matrix(tail->type)) {
assert(arr->deref.child == NULL);
- matrix_offset = arr->base_offset;
+ matrix_col = arr->base_offset;
} else {
constant = constant->elements[arr->base_offset];
}
tail = tail->child;
}
- unsigned bit_size = glsl_get_bit_size(glsl_get_base_type(tail->type));
+ unsigned bit_size = glsl_get_bit_size(tail->type);
nir_load_const_instr *load =
nir_load_const_instr_create(shader, glsl_get_vector_elements(tail->type),
bit_size);
- matrix_offset *= load->def.num_components;
- for (unsigned i = 0; i < load->def.num_components; i++) {
- switch (glsl_get_base_type(tail->type)) {
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT:
- load->value.u32[i] = constant->value.u[matrix_offset + i];
- break;
- case GLSL_TYPE_DOUBLE:
- load->value.f64[i] = constant->value.d[matrix_offset + i];
- break;
- case GLSL_TYPE_BOOL:
- load->value.u32[i] = constant->value.b[matrix_offset + i] ?
- NIR_TRUE : NIR_FALSE;
- break;
- default:
- unreachable("Invalid immediate type");
- }
+ switch (glsl_get_base_type(tail->type)) {
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_BOOL:
+ load->value = constant->values[matrix_col];
+ break;
+ default:
+ unreachable("Invalid immediate type");
}
return load;
static bool
remove_use_cb(nir_src *src, void *state)
{
+ (void) state;
+
if (src_is_valid(src))
list_del(&src->use_link);
static bool
remove_def_cb(nir_dest *dest, void *state)
{
+ (void) state;
+
if (!dest->is_ssa)
list_del(&dest->reg.def_link);
visit_parallel_copy_dest(nir_parallel_copy_instr *instr,
nir_foreach_dest_cb cb, void *state)
{
- nir_foreach_parallel_copy_entry(instr, entry) {
+ nir_foreach_parallel_copy_entry(entry, instr) {
if (!cb(&entry->dest, state))
return false;
}
return true;
}
-static bool
-visit_call_src(nir_call_instr *instr, nir_foreach_src_cb cb, void *state)
-{
- return true;
-}
-
-static bool
-visit_load_const_src(nir_load_const_instr *instr, nir_foreach_src_cb cb,
- void *state)
-{
- return true;
-}
-
static bool
visit_phi_src(nir_phi_instr *instr, nir_foreach_src_cb cb, void *state)
{
visit_parallel_copy_src(nir_parallel_copy_instr *instr,
nir_foreach_src_cb cb, void *state)
{
- nir_foreach_parallel_copy_entry(instr, entry) {
+ nir_foreach_parallel_copy_entry(entry, instr) {
if (!visit_src(&entry->src, cb, state))
return false;
}
return false;
break;
case nir_instr_type_call:
- if (!visit_call_src(nir_instr_as_call(instr), cb, state))
- return false;
+ /* Call instructions have no regular sources */
break;
case nir_instr_type_load_const:
- if (!visit_load_const_src(nir_instr_as_load_const(instr), cb, state))
- return false;
+ /* Constant load instructions have no regular sources */
break;
case nir_instr_type_phi:
if (!visit_phi_src(nir_instr_as_phi(instr), cb, state))
{
assert(!new_src.is_ssa || def != new_src.ssa);
- nir_foreach_use_safe(def, use_src)
+ nir_foreach_use_safe(use_src, def)
nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
- nir_foreach_if_use_safe(def, use_src)
+ nir_foreach_if_use_safe(use_src, def)
nir_if_rewrite_condition(use_src->parent_if, new_src);
}
{
assert(!new_src.is_ssa || def != new_src.ssa);
- nir_foreach_use_safe(def, use_src) {
+ nir_foreach_use_safe(use_src, def) {
assert(use_src->parent_instr != def->parent_instr);
/* Since def already dominates all of its uses, the only way a use can
* not be dominated by after_me is if it is between def and after_me in
nir_instr_rewrite_src(use_src->parent_instr, use_src, new_src);
}
- nir_foreach_if_use_safe(def, use_src)
+ nir_foreach_if_use_safe(use_src, def)
nir_if_rewrite_condition(use_src->parent_if, new_src);
}
nir_ssa_def_components_read(nir_ssa_def *def)
{
uint8_t read_mask = 0;
- nir_foreach_use(def, use) {
+ nir_foreach_use(use, def) {
if (use->parent_instr->type == nir_instr_type_alu) {
nir_alu_instr *alu = nir_instr_as_alu(use->parent_instr);
nir_alu_src *alu_src = exec_node_data(nir_alu_src, use, src);
case nir_cf_node_if: {
/* Are we at the end of the if? Go to the beginning of the else */
nir_if *if_stmt = nir_cf_node_as_if(parent);
- if (&block->cf_node == nir_if_last_then_node(if_stmt))
- return nir_cf_node_as_block(nir_if_first_else_node(if_stmt));
+ if (block == nir_if_last_then_block(if_stmt))
+ return nir_if_first_else_block(if_stmt);
- assert(&block->cf_node == nir_if_last_else_node(if_stmt));
+ assert(block == nir_if_last_else_block(if_stmt));
/* fall through */
}
case nir_cf_node_if: {
/* Are we at the beginning of the else? Go to the end of the if */
nir_if *if_stmt = nir_cf_node_as_if(parent);
- if (&block->cf_node == nir_if_first_else_node(if_stmt))
- return nir_cf_node_as_block(nir_if_last_then_node(if_stmt));
+ if (block == nir_if_first_else_block(if_stmt))
+ return nir_if_last_then_block(if_stmt);
- assert(&block->cf_node == nir_if_first_then_node(if_stmt));
+ assert(block == nir_if_first_then_block(if_stmt));
/* fall through */
}
case nir_cf_node_if: {
nir_if *if_stmt = nir_cf_node_as_if(node);
- return nir_cf_node_as_block(nir_if_first_then_node(if_stmt));
+ return nir_if_first_then_block(if_stmt);
}
case nir_cf_node_loop: {
nir_loop *loop = nir_cf_node_as_loop(node);
- return nir_cf_node_as_block(nir_loop_first_cf_node(loop));
+ return nir_loop_first_block(loop);
}
case nir_cf_node_block: {
case nir_cf_node_if: {
nir_if *if_stmt = nir_cf_node_as_if(node);
- return nir_cf_node_as_block(nir_if_last_else_node(if_stmt));
+ return nir_if_last_else_block(if_stmt);
}
case nir_cf_node_loop: {
nir_loop *loop = nir_cf_node_as_loop(node);
- return nir_cf_node_as_block(nir_loop_last_cf_node(loop));
+ return nir_loop_last_block(loop);
}
case nir_cf_node_block: {
nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
{
if (node->type == nir_cf_node_block)
- return nir_cf_node_cf_tree_first(nir_cf_node_next(node));
+ return nir_block_cf_tree_next(nir_cf_node_as_block(node));
else if (node->type == nir_cf_node_function)
return NULL;
else
return nir_intrinsic_load_sample_mask_in;
case SYSTEM_VALUE_LOCAL_INVOCATION_ID:
return nir_intrinsic_load_local_invocation_id;
+ case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX:
+ return nir_intrinsic_load_local_invocation_index;
case SYSTEM_VALUE_WORK_GROUP_ID:
return nir_intrinsic_load_work_group_id;
case SYSTEM_VALUE_NUM_WORK_GROUPS:
return SYSTEM_VALUE_SAMPLE_MASK_IN;
case nir_intrinsic_load_local_invocation_id:
return SYSTEM_VALUE_LOCAL_INVOCATION_ID;
+ case nir_intrinsic_load_local_invocation_index:
+ return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX;
case nir_intrinsic_load_num_work_groups:
return SYSTEM_VALUE_NUM_WORK_GROUPS;
case nir_intrinsic_load_work_group_id: