nir_shader *
nir_shader_create(void *mem_ctx,
gl_shader_stage stage,
- const nir_shader_compiler_options *options)
+ const nir_shader_compiler_options *options,
+ shader_info *si)
{
- nir_shader *shader = ralloc(mem_ctx, nir_shader);
+ nir_shader *shader = rzalloc(mem_ctx, nir_shader);
exec_list_make_empty(&shader->uniforms);
exec_list_make_empty(&shader->inputs);
exec_list_make_empty(&shader->shared);
shader->options = options;
- memset(&shader->info, 0, sizeof(shader->info));
+
+ shader->info = si ? si : rzalloc(shader, shader_info);
exec_list_make_empty(&shader->functions);
exec_list_make_empty(&shader->registers);
nir_block *
nir_block_create(nir_shader *shader)
{
- nir_block *block = ralloc(shader, nir_block);
+ nir_block *block = rzalloc(shader, nir_block);
cf_init(&block->cf_node, nir_cf_node_block);
_mesa_key_pointer_equal);
block->imm_dom = NULL;
/* XXX maybe it would be worth it to defer allocation? This
- * way it doesn't get allocated for shader ref's that never run
+ * way it doesn't get allocated for shader refs that never run
* nir_calc_dominance? For example, state-tracker creates an
* initial IR, clones that, runs appropriate lowering pass, passes
* to driver which does common lowering/opt, and then stores ref
nir_loop *
nir_loop_create(nir_shader *shader)
{
- nir_loop *loop = ralloc(shader, nir_loop);
+ nir_loop *loop = rzalloc(shader, nir_loop);
cf_init(&loop->cf_node, nir_cf_node_loop);
nir_alu_instr_create(nir_shader *shader, nir_op op)
{
unsigned num_srcs = nir_op_infos[op].num_inputs;
+ /* TODO: don't use rzalloc */
nir_alu_instr *instr =
- ralloc_size(shader,
- sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
+ rzalloc_size(shader,
+ sizeof(nir_alu_instr) + num_srcs * sizeof(nir_alu_src));
instr_init(&instr->instr, nir_instr_type_alu);
instr->op = op;
nir_intrinsic_instr_create(nir_shader *shader, nir_intrinsic_op op)
{
unsigned num_srcs = nir_intrinsic_infos[op].num_srcs;
+ /* TODO: don't use rzalloc */
nir_intrinsic_instr *instr =
- ralloc_size(shader,
+ rzalloc_size(shader,
sizeof(nir_intrinsic_instr) + num_srcs * sizeof(nir_src));
instr_init(&instr->instr, nir_instr_type_intrinsic);
return deref;
}
-static nir_deref_var *
-copy_deref_var(void *mem_ctx, nir_deref_var *deref)
+nir_deref_var *
+nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx)
{
+ if (deref == NULL)
+ return NULL;
+
nir_deref_var *ret = nir_deref_var_create(mem_ctx, deref->var);
ret->deref.type = deref->deref.type;
if (deref->deref.child)
- ret->deref.child = nir_copy_deref(ret, deref->deref.child);
+ ret->deref.child = nir_deref_clone(deref->deref.child, ret);
return ret;
}
static nir_deref_array *
-copy_deref_array(void *mem_ctx, nir_deref_array *deref)
+deref_array_clone(const nir_deref_array *deref, void *mem_ctx)
{
nir_deref_array *ret = nir_deref_array_create(mem_ctx);
ret->base_offset = deref->base_offset;
}
ret->deref.type = deref->deref.type;
if (deref->deref.child)
- ret->deref.child = nir_copy_deref(ret, deref->deref.child);
+ ret->deref.child = nir_deref_clone(deref->deref.child, ret);
return ret;
}
static nir_deref_struct *
-copy_deref_struct(void *mem_ctx, nir_deref_struct *deref)
+deref_struct_clone(const nir_deref_struct *deref, void *mem_ctx)
{
nir_deref_struct *ret = nir_deref_struct_create(mem_ctx, deref->index);
ret->deref.type = deref->deref.type;
if (deref->deref.child)
- ret->deref.child = nir_copy_deref(ret, deref->deref.child);
+ ret->deref.child = nir_deref_clone(deref->deref.child, ret);
return ret;
}
nir_deref *
-nir_copy_deref(void *mem_ctx, nir_deref *deref)
+nir_deref_clone(const nir_deref *deref, void *mem_ctx)
{
if (deref == NULL)
return NULL;
switch (deref->deref_type) {
case nir_deref_type_var:
- return ©_deref_var(mem_ctx, nir_deref_as_var(deref))->deref;
+ return &nir_deref_var_clone(nir_deref_as_var(deref), mem_ctx)->deref;
case nir_deref_type_array:
- return ©_deref_array(mem_ctx, nir_deref_as_array(deref))->deref;
+ return &deref_array_clone(nir_deref_as_array(deref), mem_ctx)->deref;
case nir_deref_type_struct:
- return ©_deref_struct(mem_ctx, nir_deref_as_struct(deref))->deref;
+ return &deref_struct_clone(nir_deref_as_struct(deref), mem_ctx)->deref;
default:
unreachable("Invalid dereference type");
}
assert(constant);
const nir_deref *tail = &deref->deref;
- unsigned matrix_offset = 0;
+ unsigned matrix_col = 0;
while (tail->child) {
switch (tail->child->deref_type) {
case nir_deref_type_array: {
assert(arr->deref_array_type == nir_deref_array_type_direct);
if (glsl_type_is_matrix(tail->type)) {
assert(arr->deref.child == NULL);
- matrix_offset = arr->base_offset;
+ matrix_col = arr->base_offset;
} else {
constant = constant->elements[arr->base_offset];
}
nir_load_const_instr_create(shader, glsl_get_vector_elements(tail->type),
bit_size);
- matrix_offset *= load->def.num_components;
- for (unsigned i = 0; i < load->def.num_components; i++) {
- switch (glsl_get_base_type(tail->type)) {
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT:
- load->value.u32[i] = constant->value.u[matrix_offset + i];
- break;
- case GLSL_TYPE_DOUBLE:
- load->value.f64[i] = constant->value.d[matrix_offset + i];
- break;
- case GLSL_TYPE_BOOL:
- load->value.u32[i] = constant->value.b[matrix_offset + i] ?
- NIR_TRUE : NIR_FALSE;
- break;
- default:
- unreachable("Invalid immediate type");
- }
+ switch (glsl_get_base_type(tail->type)) {
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_BOOL:
+ load->value = constant->values[matrix_col];
+ break;
+ default:
+ unreachable("Invalid immediate type");
}
return load;
nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node)
{
if (node->type == nir_cf_node_block)
- return nir_cf_node_cf_tree_first(nir_cf_node_next(node));
+ return nir_block_cf_tree_next(nir_cf_node_as_block(node));
else if (node->type == nir_cf_node_function)
return NULL;
else