nir_alu_instr *alu_instr;
nir_intrinsic_instr *intrinsic_instr;
nir_tex_instr *tex_instr;
- nir_load_const_instr *load_const_instr;
switch (instr->type) {
case nir_instr_type_alu:
tex_instr = nir_instr_as_tex(instr);
return &tex_instr->dest;
- case nir_instr_type_load_const:
- load_const_instr = nir_instr_as_load_const(instr);
- return &load_const_instr->dest;
-
default:
assert(0);
break;
*/
if (ir->type->base_type == GLSL_TYPE_BOOL) {
- nir_load_const_instr *const_zero = nir_load_const_instr_create(shader);
- const_zero->num_components = 1;
+ nir_load_const_instr *const_zero = nir_load_const_instr_create(shader, 1);
const_zero->value.u[0] = 0;
- const_zero->dest.is_ssa = true;
- nir_ssa_def_init(&const_zero->instr, &const_zero->dest.ssa, 1, NULL);
nir_instr_insert_after_cf_list(this->cf_node_list, &const_zero->instr);
nir_alu_instr *compare = nir_alu_instr_create(shader, nir_op_ine);
compare->src[0].src.is_ssa = true;
compare->src[0].src.ssa = &load->dest.ssa;
compare->src[1].src.is_ssa = true;
- compare->src[1].src.ssa = &const_zero->dest.ssa;
+ compare->src[1].src.ssa = &const_zero->def;
for (unsigned i = 0; i < ir->type->vector_elements; i++)
compare->src[1].swizzle[i] = 0;
compare->dest.write_mask = (1 << ir->type->vector_elements) - 1;
}
nir_load_const_instr *
-nir_load_const_instr_create(void *mem_ctx)
+nir_load_const_instr_create(void *mem_ctx, unsigned num_components)
{
nir_load_const_instr *instr = ralloc(mem_ctx, nir_load_const_instr);
instr_init(&instr->instr, nir_instr_type_load_const);
- dest_init(&instr->dest);
- instr->num_components = 0;
- instr->array_elems = 0;
+ nir_ssa_def_init(&instr->instr, &instr->def, num_components, NULL);
return instr;
}
return cb(&instr->dest, state);
}
-static bool
-visit_load_const_dest(nir_load_const_instr *instr, nir_foreach_dest_cb cb,
- void *state)
-{
- return cb(&instr->dest, state);
-}
-
static bool
visit_phi_dest(nir_phi_instr *instr, nir_foreach_dest_cb cb, void *state)
{
return visit_intrinsic_dest(nir_instr_as_intrinsic(instr), cb, state);
case nir_instr_type_tex:
return visit_texture_dest(nir_instr_as_tex(instr), cb, state);
- case nir_instr_type_load_const:
- return visit_load_const_dest(nir_instr_as_load_const(instr), cb, state);
case nir_instr_type_phi:
return visit_phi_dest(nir_instr_as_phi(instr), cb, state);
case nir_instr_type_parallel_copy:
return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr),
cb, state);
+ case nir_instr_type_load_const:
case nir_instr_type_ssa_undef:
case nir_instr_type_call:
case nir_instr_type_jump:
case nir_instr_type_alu:
case nir_instr_type_tex:
case nir_instr_type_intrinsic:
- case nir_instr_type_load_const:
case nir_instr_type_phi:
case nir_instr_type_parallel_copy: {
struct foreach_ssa_def_state foreach_state = {cb, state};
return nir_foreach_dest(instr, nir_ssa_def_visitor, &foreach_state);
}
+ case nir_instr_type_load_const:
+ return cb(&nir_instr_as_load_const(instr)->def, state);
case nir_instr_type_ssa_undef:
return cb(&nir_instr_as_ssa_undef(instr)->def, state);
case nir_instr_type_call:
nir_load_const_instr *load = nir_instr_as_load_const(src.ssa->parent_instr);
- if (load->array_elems == 0)
- return &load->value;
- if (load->array_elems == 1)
- return load->array;
- else
- return NULL;
+ return &load->value;
}
bool
typedef struct {
nir_instr instr;
- union {
- nir_const_value value;
- nir_const_value *array;
- };
-
- unsigned num_components;
-
- /**
- * The number of constant array elements to be copied into the variable. If
- * this != 0, then value.array holds the array of size array_elems;
- * otherwise, value.value holds the single vector constant (the more common
- * case, and the only case for SSA destinations).
- */
- unsigned array_elems;
+ nir_const_value value;
- nir_dest dest;
+ nir_ssa_def def;
} nir_load_const_instr;
typedef enum {
nir_jump_instr *nir_jump_instr_create(void *mem_ctx, nir_jump_type type);
-nir_load_const_instr *nir_load_const_instr_create(void *mem_ctx);
+nir_load_const_instr *nir_load_const_instr_create(void *mem_ctx,
+ unsigned num_components);
nir_intrinsic_instr *nir_intrinsic_instr_create(void *mem_ctx,
nir_intrinsic_op op);
if (!copy->src.is_ssa)
continue;
+ /* Since load_const instructions are SSA only, we can't replace their
+ * destinations with registers and, therefore, can't coalesce them.
+ */
+ if (copy->src.ssa->parent_instr->type == nir_instr_type_load_const)
+ continue;
+
/* Don't try and coalesce these */
if (copy->dest.ssa.num_components != copy->src.ssa->num_components)
continue;
new_instr->const_index[0] =
(int) instr->variables[0]->var->data.atomic.buffer_index;
- nir_load_const_instr *offset_const = nir_load_const_instr_create(mem_ctx);
- offset_const->num_components = 1;
+ nir_load_const_instr *offset_const = nir_load_const_instr_create(mem_ctx, 1);
offset_const->value.u[0] = instr->variables[0]->var->data.atomic.offset;
- offset_const->dest.is_ssa = true;
- nir_ssa_def_init(&offset_const->instr, &offset_const->dest.ssa, 1, NULL);
nir_instr_insert_before(&instr->instr, &offset_const->instr);
- nir_ssa_def *offset_def = &offset_const->dest.ssa;
+ nir_ssa_def *offset_def = &offset_const->def;
if (instr->variables[0]->deref.child != NULL) {
assert(instr->variables[0]->deref.child->deref_type ==
if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
nir_load_const_instr *atomic_counter_size =
- nir_load_const_instr_create(mem_ctx);
- atomic_counter_size->num_components = 1;
+ nir_load_const_instr_create(mem_ctx, 1);
atomic_counter_size->value.u[0] = ATOMIC_COUNTER_SIZE;
- atomic_counter_size->dest.is_ssa = true;
- nir_ssa_def_init(&atomic_counter_size->instr,
- &atomic_counter_size->dest.ssa, 1, NULL);
nir_instr_insert_before(&instr->instr, &atomic_counter_size->instr);
nir_alu_instr *mul = nir_alu_instr_create(mem_ctx, nir_op_imul);
mul->dest.write_mask = 0x1;
mul->src[0].src = nir_src_copy(deref_array->indirect, mem_ctx);
mul->src[1].src.is_ssa = true;
- mul->src[1].src.ssa = &atomic_counter_size->dest.ssa;
+ mul->src[1].src.ssa = &atomic_counter_size->def;
nir_instr_insert_before(&instr->instr, &mul->instr);
nir_alu_instr *add = nir_alu_instr_create(mem_ctx, nir_op_iadd);
add->src[0].src.is_ssa = true;
add->src[0].src.ssa = &mul->dest.dest.ssa;
add->src[1].src.is_ssa = true;
- add->src[1].src.ssa = &offset_const->dest.ssa;
+ add->src[1].src.ssa = &offset_const->def;
nir_instr_insert_before(&instr->instr, &add->instr);
offset_def = &add->dest.dest.ssa;
if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
nir_load_const_instr *load_const =
- nir_load_const_instr_create(state->mem_ctx);
- load_const->num_components = 1;
+ nir_load_const_instr_create(state->mem_ctx, 1);
load_const->value.u[0] = size;
- load_const->dest.is_ssa = true;
- nir_ssa_def_init(&load_const->instr, &load_const->dest.ssa,
- 1, NULL);
nir_instr_insert_before(instr, &load_const->instr);
nir_alu_instr *mul = nir_alu_instr_create(state->mem_ctx,
nir_op_imul);
mul->src[0].src.is_ssa = true;
- mul->src[0].src.ssa = &load_const->dest.ssa;
+ mul->src[0].src.ssa = &load_const->def;
mul->src[1].src = nir_src_copy(deref_array->indirect,
state->mem_ctx);
mul->dest.write_mask = 1;
if (src.reg.indirect) {
nir_load_const_instr *load_const =
- nir_load_const_instr_create(state->mem_ctx);
- load_const->num_components = 1;
+ nir_load_const_instr_create(state->mem_ctx, 1);
load_const->value.u[0] = glsl_get_length(parent_type);
- load_const->dest.is_ssa = true;
- nir_ssa_def_init(&load_const->instr, &load_const->dest.ssa, 1, NULL);
nir_instr_insert_before(instr, &load_const->instr);
nir_alu_instr *mul = nir_alu_instr_create(state->mem_ctx, nir_op_imul);
mul->src[0].src = *src.reg.indirect;
mul->src[1].src.is_ssa = true;
- mul->src[1].src.ssa = &load_const->dest.ssa;
+ mul->src[1].src.ssa = &load_const->def;
mul->dest.write_mask = 1;
mul->dest.dest.is_ssa = true;
nir_ssa_def_init(&mul->instr, &mul->dest.dest.ssa, 1, NULL);
tail = tail->child;
}
- nir_load_const_instr *load = nir_load_const_instr_create(state->mem_ctx);
- load->array_elems = 0;
- load->num_components = glsl_get_vector_elements(tail->type);
+ nir_load_const_instr *load =
+ nir_load_const_instr_create(state->mem_ctx,
+ glsl_get_vector_elements(tail->type));
- matrix_offset *= load->num_components;
- for (unsigned i = 0; i < load->num_components; i++) {
+ matrix_offset *= load->def.num_components;
+ for (unsigned i = 0; i < load->def.num_components; i++) {
switch (glsl_get_base_type(tail->type)) {
case GLSL_TYPE_FLOAT:
case GLSL_TYPE_INT:
if (deref->var->constant_initializer) {
nir_load_const_instr *load = get_const_initializer_load(deref, &state);
- load->dest.is_ssa = true;
- nir_ssa_def_init(&load->instr, &load->dest.ssa,
+ nir_ssa_def_init(&load->instr, &load->def,
glsl_get_vector_elements(node->type), NULL);
nir_instr_insert_before_cf_list(&impl->body, &load->instr);
- def_stack_push(node, &load->dest.ssa, &state);
+ def_stack_push(node, &load->def, &state);
}
if (deref->var->data.mode == nir_var_shader_out)
/* We shouldn't have any saturate modifiers in the optimization loop. */
assert(!instr->dest.saturate);
- dest = nir_load_const_instr_create(mem_ctx);
- dest->array_elems = 0;
- dest->num_components = instr->dest.dest.ssa.num_components;
+ dest = nir_load_const_instr_create(mem_ctx,
+ instr->dest.dest.ssa.num_components);
switch (instr->op) {
case nir_op_ineg:
return false;
}
- dest->dest.is_ssa = true;
- nir_ssa_def_init(&dest->instr, &dest->dest.ssa,
- instr->dest.dest.ssa.num_components,
- instr->dest.dest.ssa.name);
-
nir_instr_insert_before(&instr->instr, &dest->instr);
nir_src new_src = {
.is_ssa = true,
- .ssa = &dest->dest.ssa,
+ .ssa = &dest->def,
};
nir_ssa_def_rewrite_uses(&instr->dest.dest.ssa, new_src, mem_ctx);
nir_load_const_instr *load1 = nir_instr_as_load_const(instr1);
nir_load_const_instr *load2 = nir_instr_as_load_const(instr2);
- if (load1->num_components != load2->num_components)
+ if (load1->def.num_components != load2->def.num_components)
return false;
return memcmp(load1->value.f, load2->value.f,
- load1->num_components * sizeof load2->value.f) == 0;
+ load1->def.num_components * sizeof load2->value.f) == 0;
}
case nir_instr_type_phi: {
nir_phi_instr *phi1 = nir_instr_as_phi(instr1);
assert(nir_instr_as_alu(instr)->dest.dest.is_ssa);
return &nir_instr_as_alu(instr)->dest.dest.ssa;
case nir_instr_type_load_const:
- assert(nir_instr_as_load_const(instr)->dest.is_ssa);
- return &nir_instr_as_load_const(instr)->dest.ssa;
+ return &nir_instr_as_load_const(instr)->def;
case nir_instr_type_phi:
assert(nir_instr_as_phi(instr)->dest.is_ssa);
return &nir_instr_as_phi(instr)->dest.ssa;
nir_alu_instr *alu_instr;
nir_intrinsic_instr *intrin_instr;
nir_tex_instr *tex_instr;
- nir_load_const_instr *load_const_instr;
instr->live = false;
worklist_push(worklist, instr);
break;
- case nir_instr_type_load_const:
- load_const_instr = nir_instr_as_load_const(instr);
- if (!load_const_instr->dest.is_ssa)
- worklist_push(worklist, instr);
- break;
-
default:
break;
}
}
static void
-print_const_value(nir_const_value value, unsigned num_components, FILE *fp)
+print_load_const_instr(nir_load_const_instr *instr, unsigned tabs, FILE *fp)
{
- fprintf(fp, "(");
+ print_ssa_def(&instr->def, fp);
+
+ fprintf(fp, " = load_const (");
bool first = true;
- for (unsigned i = 0; i < num_components; i++) {
+ for (unsigned i = 0; i < instr->def.num_components; i++) {
if (!first)
fprintf(fp, ", ");
* and then print the float in a comment for readability.
*/
- fprintf(fp, "0x%08x /* %f */", value.u[i], value.f[i]);
+ fprintf(fp, "0x%08x /* %f */", instr->value.u[i], instr->value.f[i]);
first = false;
}
-
- fprintf(fp, ")");
-}
-
-static void
-print_load_const_instr(nir_load_const_instr *instr, unsigned tabs, FILE *fp)
-{
- print_dest(&instr->dest, fp);
-
- fprintf(fp, " = load_const ");
-
- if (instr->array_elems == 0) {
- print_const_value(instr->value, instr->num_components, fp);
- } else {
- fprintf(fp, "{\n");
- for (unsigned i = 0; i < instr->array_elems; i++) {
- print_tabs(tabs + 1, fp);
- print_const_value(instr->array[i], instr->num_components, fp);
- fprintf(fp, ", \n");
- }
- fprintf(fp, "}");
- }
}
static void
case nir_search_value_constant: {
const nir_search_constant *c = nir_search_value_as_constant(value);
- nir_load_const_instr *load = nir_load_const_instr_create(mem_ctx);
- load->dest.is_ssa = true;
- nir_ssa_def_init(&load->instr, &load->dest.ssa, 1, NULL);
+ nir_load_const_instr *load = nir_load_const_instr_create(mem_ctx, 1);
switch (type) {
case nir_type_float:
- load->dest.ssa.name = ralloc_asprintf(mem_ctx, "%f", c->data.f);
+ load->def.name = ralloc_asprintf(mem_ctx, "%f", c->data.f);
load->value.f[0] = c->data.f;
break;
case nir_type_int:
- load->dest.ssa.name = ralloc_asprintf(mem_ctx, "%d", c->data.i);
+ load->def.name = ralloc_asprintf(mem_ctx, "%d", c->data.i);
load->value.i[0] = c->data.i;
break;
case nir_type_unsigned:
nir_alu_src val = {
.src.is_ssa = true,
- .src.ssa = &load->dest.ssa,
+ .src.ssa = &load->def,
.negate = false,
.abs = false,
.swizzle = { 0, 0, 0, 0 } /* Splatted scalar */
static void
validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
{
- validate_dest(&instr->dest, state);
-
- if (instr->array_elems != 0) {
- assert(!instr->dest.is_ssa);
- assert(instr->dest.reg.base_offset + instr->array_elems <=
- instr->dest.reg.reg->num_array_elems);
- }
+ validate_ssa_def(&instr->def, state);
}
static void
void nir_emit_alu(nir_alu_instr *instr);
void nir_emit_intrinsic(nir_intrinsic_instr *instr);
void nir_emit_texture(nir_tex_instr *instr);
- void nir_emit_load_const(nir_load_const_instr *instr);
void nir_emit_jump(nir_jump_instr *instr);
fs_reg get_nir_src(nir_src src);
fs_reg get_nir_alu_src(nir_alu_instr *instr, unsigned src);
break;
case nir_instr_type_load_const:
- nir_emit_load_const(nir_instr_as_load_const(instr));
+ /* We can hit these, but we do nothing now and use them as
+ * immediates later.
+ */
break;
case nir_instr_type_jump:
emit_percomp(MOV(dest, this->result), (1 << num_components) - 1);
}
-void
-fs_visitor::nir_emit_load_const(nir_load_const_instr *instr)
-{
- /* Bail on SSA constant loads. These are used for immediates. */
- if (instr->dest.is_ssa)
- return;
-
- fs_reg dest = get_nir_dest(instr->dest);
- dest.type = BRW_REGISTER_TYPE_UD;
- if (instr->array_elems == 0) {
- for (unsigned i = 0; i < instr->num_components; i++) {
- emit(MOV(dest, fs_reg(instr->value.u[i])));
- dest.reg_offset++;
- }
- } else {
- for (unsigned i = 0; i < instr->array_elems; i++) {
- for (unsigned j = 0; j < instr->num_components; j++) {
- emit(MOV(dest, fs_reg(instr->array[i].u[j])));
- dest.reg_offset++;
- }
- }
- }
-}
-
void
fs_visitor::nir_emit_jump(nir_jump_instr *instr)
{