X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fglsl%2Fnir%2Fnir_lower_io.c;h=ec6d09d5b6d7f7eb295bfe92e5800a2629b0d94b;hb=d5c9955d3eaa7311e2b2350b6964bae516c7b7b2;hp=ddbc249371b11eec5d350a2b8c3463627e7eb12d;hpb=27c6e3e4ca665c528b94937b1b0a7c10de515d89;p=mesa.git diff --git a/src/glsl/nir/nir_lower_io.c b/src/glsl/nir/nir_lower_io.c index ddbc249371b..ec6d09d5b6d 100644 --- a/src/glsl/nir/nir_lower_io.c +++ b/src/glsl/nir/nir_lower_io.c @@ -29,66 +29,31 @@ /* * This lowering pass converts references to input/output variables with * loads/stores to actual input/output intrinsics. - * - * NOTE: This pass really only works for scalar backends at the moment due - * to the way it packes the input/output data. */ #include "nir.h" +#include "nir_builder.h" struct lower_io_state { + nir_builder builder; void *mem_ctx; + int (*type_size)(const struct glsl_type *type); + nir_variable_mode mode; }; -static unsigned -type_size(const struct glsl_type *type) -{ - unsigned int size, i; - - switch (glsl_get_base_type(type)) { - case GLSL_TYPE_UINT: - case GLSL_TYPE_INT: - case GLSL_TYPE_FLOAT: - case GLSL_TYPE_BOOL: - return glsl_get_components(type); - case GLSL_TYPE_ARRAY: - return type_size(glsl_get_array_element(type)) * glsl_get_length(type); - case GLSL_TYPE_STRUCT: - size = 0; - for (i = 0; i < glsl_get_length(type); i++) { - size += type_size(glsl_get_struct_field(type, i)); - } - return size; - case GLSL_TYPE_SAMPLER: - return 0; - case GLSL_TYPE_ATOMIC_UINT: - return 0; - case GLSL_TYPE_INTERFACE: - return 0; - case GLSL_TYPE_IMAGE: - return 0; - case GLSL_TYPE_VOID: - case GLSL_TYPE_ERROR: - unreachable("not reached"); - } - - return 0; -} - -static void -assign_var_locations(struct hash_table *ht, unsigned *size) +void +nir_assign_var_locations(struct exec_list *var_list, unsigned *size, + int (*type_size)(const struct glsl_type *)) { unsigned location = 0; - struct hash_entry *entry; - hash_table_foreach(ht, entry) { - nir_variable *var = (nir_variable *) entry->data; - + nir_foreach_variable(var, var_list) { /* * UBO's have their own address spaces, so don't count them towards the * number of global uniforms */ - if (var->data.mode == nir_var_uniform && var->interface_type != NULL) + if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) && + var->interface_type != NULL) continue; var->data.driver_location = location; @@ -98,36 +63,54 @@ assign_var_locations(struct hash_table *ht, unsigned *size) *size = location; } -static void -assign_var_locations_shader(nir_shader *shader) +/** + * Returns true if we're processing a stage whose inputs are arrays indexed + * by a vertex number (such as geometry shader inputs). + */ +static bool +is_per_vertex_input(struct lower_io_state *state, nir_variable *var) { - assign_var_locations(shader->inputs, &shader->num_inputs); - assign_var_locations(shader->outputs, &shader->num_outputs); - assign_var_locations(shader->uniforms, &shader->num_uniforms); + gl_shader_stage stage = state->builder.shader->stage; + + return var->data.mode == nir_var_shader_in && !var->data.patch && + (stage == MESA_SHADER_TESS_CTRL || + stage == MESA_SHADER_TESS_EVAL || + stage == MESA_SHADER_GEOMETRY); } static bool -deref_has_indirect(nir_deref_var *deref) +is_per_vertex_output(struct lower_io_state *state, nir_variable *var) { - for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) { - if (tail->deref_type == nir_deref_type_array) { - nir_deref_array *arr = nir_deref_as_array(tail); - if (arr->deref_array_type == nir_deref_array_type_indirect) - return true; - } - } - - return false; + gl_shader_stage stage = state->builder.shader->stage; + return var->data.mode == nir_var_shader_out && !var->data.patch && + stage == MESA_SHADER_TESS_CTRL; } -static unsigned -get_io_offset(nir_deref_var *deref, nir_instr *instr, nir_src *indirect, - struct lower_io_state *state) +static nir_ssa_def * +get_io_offset(nir_builder *b, nir_deref_var *deref, + nir_ssa_def **vertex_index, + int (*type_size)(const struct glsl_type *)) { - bool found_indirect = false; - unsigned base_offset = 0; - nir_deref *tail = &deref->deref; + + /* For per-vertex input arrays (i.e. geometry shader inputs), keep the + * outermost array index separate. Process the rest normally. + */ + if (vertex_index != NULL) { + tail = tail->child; + assert(tail->deref_type == nir_deref_type_array); + nir_deref_array *deref_array = nir_deref_as_array(tail); + + nir_ssa_def *vtx = nir_imm_int(b, deref_array->base_offset); + if (deref_array->deref_array_type == nir_deref_array_type_indirect) { + vtx = nir_iadd(b, vtx, nir_ssa_for_src(b, deref_array->indirect, 1)); + } + *vertex_index = vtx; + } + + /* Just emit code and let constant-folding go to town */ + nir_ssa_def *offset = nir_imm_int(b, 0); + while (tail->child != NULL) { const struct glsl_type *parent_type = tail->type; tail = tail->child; @@ -136,51 +119,51 @@ get_io_offset(nir_deref_var *deref, nir_instr *instr, nir_src *indirect, nir_deref_array *deref_array = nir_deref_as_array(tail); unsigned size = type_size(tail->type); - base_offset += size * deref_array->base_offset; + offset = nir_iadd(b, offset, + nir_imm_int(b, size * deref_array->base_offset)); if (deref_array->deref_array_type == nir_deref_array_type_indirect) { - nir_load_const_instr *load_const = - nir_load_const_instr_create(state->mem_ctx, 1); - load_const->value.u[0] = size; - nir_instr_insert_before(instr, &load_const->instr); - - nir_alu_instr *mul = nir_alu_instr_create(state->mem_ctx, - nir_op_imul); - mul->src[0].src.is_ssa = true; - mul->src[0].src.ssa = &load_const->def; - nir_src_copy(&mul->src[1].src, &deref_array->indirect, - state->mem_ctx); - mul->dest.write_mask = 1; - nir_ssa_dest_init(&mul->instr, &mul->dest.dest, 1, NULL); - nir_instr_insert_before(instr, &mul->instr); - - if (found_indirect) { - nir_alu_instr *add = nir_alu_instr_create(state->mem_ctx, - nir_op_iadd); - add->src[0].src = *indirect; - add->src[1].src.is_ssa = true; - add->src[1].src.ssa = &mul->dest.dest.ssa; - add->dest.write_mask = 1; - nir_ssa_dest_init(&add->instr, &add->dest.dest, 1, NULL); - nir_instr_insert_before(instr, &add->instr); - - indirect->is_ssa = true; - indirect->ssa = &add->dest.dest.ssa; - } else { - indirect->is_ssa = true; - indirect->ssa = &mul->dest.dest.ssa; - found_indirect = true; - } + nir_ssa_def *mul = + nir_imul(b, nir_imm_int(b, size), + nir_ssa_for_src(b, deref_array->indirect, 1)); + + offset = nir_iadd(b, offset, mul); } } else if (tail->deref_type == nir_deref_type_struct) { nir_deref_struct *deref_struct = nir_deref_as_struct(tail); - for (unsigned i = 0; i < deref_struct->index; i++) - base_offset += type_size(glsl_get_struct_field(parent_type, i)); + unsigned field_offset = 0; + for (unsigned i = 0; i < deref_struct->index; i++) { + field_offset += type_size(glsl_get_struct_field(parent_type, i)); + } + offset = nir_iadd(b, offset, nir_imm_int(b, field_offset)); } } - return base_offset; + return offset; +} + +static nir_intrinsic_op +load_op(struct lower_io_state *state, + nir_variable_mode mode, bool per_vertex) +{ + nir_intrinsic_op op; + switch (mode) { + case nir_var_shader_in: + op = per_vertex ? nir_intrinsic_load_per_vertex_input : + nir_intrinsic_load_input; + break; + case nir_var_shader_out: + op = per_vertex ? nir_intrinsic_load_per_vertex_output : + nir_intrinsic_load_output; + break; + case nir_var_uniform: + op = nir_intrinsic_load_uniform; + break; + default: + unreachable("Unknown variable mode"); + } + return op; } static bool @@ -188,56 +171,61 @@ nir_lower_io_block(nir_block *block, void *void_state) { struct lower_io_state *state = void_state; + nir_builder *b = &state->builder; + nir_foreach_instr_safe(block, instr) { if (instr->type != nir_instr_type_intrinsic) continue; nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); + if (intrin->intrinsic != nir_intrinsic_load_var && + intrin->intrinsic != nir_intrinsic_store_var) + continue; + + nir_variable_mode mode = intrin->variables[0]->var->data.mode; + + if (state->mode != nir_var_all && state->mode != mode) + continue; + + if (mode != nir_var_shader_in && + mode != nir_var_shader_out && + mode != nir_var_uniform) + continue; + + b->cursor = nir_before_instr(instr); + switch (intrin->intrinsic) { case nir_intrinsic_load_var: { - nir_variable_mode mode = intrin->variables[0]->var->data.mode; - if (mode != nir_var_shader_in && mode != nir_var_uniform) - continue; - - bool has_indirect = deref_has_indirect(intrin->variables[0]); - - /* Figure out the opcode */ - nir_intrinsic_op load_op; - switch (mode) { - case nir_var_shader_in: - load_op = has_indirect ? nir_intrinsic_load_input_indirect : - nir_intrinsic_load_input; - break; - case nir_var_uniform: - load_op = has_indirect ? nir_intrinsic_load_uniform_indirect : - nir_intrinsic_load_uniform; - break; - default: - unreachable("Unknown variable mode"); - } + bool per_vertex = + is_per_vertex_input(state, intrin->variables[0]->var) || + is_per_vertex_output(state, intrin->variables[0]->var); - nir_intrinsic_instr *load = nir_intrinsic_instr_create(state->mem_ctx, - load_op); + nir_ssa_def *offset; + nir_ssa_def *vertex_index; + + offset = get_io_offset(b, intrin->variables[0], + per_vertex ? &vertex_index : NULL, + state->type_size); + + nir_intrinsic_instr *load = + nir_intrinsic_instr_create(state->mem_ctx, + load_op(state, mode, per_vertex)); load->num_components = intrin->num_components; - nir_src indirect; - unsigned offset = get_io_offset(intrin->variables[0], - &intrin->instr, &indirect, state); - offset += intrin->variables[0]->var->data.driver_location; + load->const_index[0] = + intrin->variables[0]->var->data.driver_location; - load->const_index[0] = offset; - load->const_index[1] = 1; + if (per_vertex) + load->src[0] = nir_src_for_ssa(vertex_index); - if (has_indirect) - load->src[0] = indirect; + load->src[per_vertex ? 1 : 0] = nir_src_for_ssa(offset); if (intrin->dest.is_ssa) { nir_ssa_dest_init(&load->instr, &load->dest, intrin->num_components, NULL); nir_ssa_def_rewrite_uses(&intrin->dest.ssa, - nir_src_for_ssa(&load->dest.ssa), - state->mem_ctx); + nir_src_for_ssa(&load->dest.ssa)); } else { nir_dest_copy(&load->dest, &intrin->dest, state->mem_ctx); } @@ -248,34 +236,35 @@ nir_lower_io_block(nir_block *block, void *void_state) } case nir_intrinsic_store_var: { - if (intrin->variables[0]->var->data.mode != nir_var_shader_out) - continue; + assert(mode == nir_var_shader_out); - bool has_indirect = deref_has_indirect(intrin->variables[0]); + nir_ssa_def *offset; + nir_ssa_def *vertex_index; - nir_intrinsic_op store_op; - if (has_indirect) { - store_op = nir_intrinsic_store_output_indirect; - } else { - store_op = nir_intrinsic_store_output; - } + bool per_vertex = + is_per_vertex_output(state, intrin->variables[0]->var); + + offset = get_io_offset(b, intrin->variables[0], + per_vertex ? &vertex_index : NULL, + state->type_size); + + nir_intrinsic_op store_op = + per_vertex ? nir_intrinsic_store_per_vertex_output : + nir_intrinsic_store_output; nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx, store_op); store->num_components = intrin->num_components; - nir_src indirect; - unsigned offset = get_io_offset(intrin->variables[0], - &intrin->instr, &indirect, state); - offset += intrin->variables[0]->var->data.driver_location; + nir_src_copy(&store->src[0], &intrin->src[0], store); - store->const_index[0] = offset; - store->const_index[1] = 1; + store->const_index[0] = + intrin->variables[0]->var->data.driver_location; - nir_src_copy(&store->src[0], &intrin->src[0], state->mem_ctx); + if (per_vertex) + store->src[1] = nir_src_for_ssa(vertex_index); - if (has_indirect) - store->src[1] = indirect; + store->src[per_vertex ? 2 : 1] = nir_src_for_ssa(offset); nir_instr_insert_before(&intrin->instr, &store->instr); nir_instr_remove(&intrin->instr); @@ -291,11 +280,16 @@ nir_lower_io_block(nir_block *block, void *void_state) } static void -nir_lower_io_impl(nir_function_impl *impl) +nir_lower_io_impl(nir_function_impl *impl, + nir_variable_mode mode, + int (*type_size)(const struct glsl_type *)) { struct lower_io_state state; + nir_builder_init(&state.builder, impl); state.mem_ctx = ralloc_parent(impl); + state.mode = mode; + state.type_size = type_size; nir_foreach_block(impl, nir_lower_io_block, &state); @@ -304,12 +298,53 @@ nir_lower_io_impl(nir_function_impl *impl) } void -nir_lower_io(nir_shader *shader) +nir_lower_io(nir_shader *shader, nir_variable_mode mode, + int (*type_size)(const struct glsl_type *)) { - assign_var_locations_shader(shader); - nir_foreach_overload(shader, overload) { if (overload->impl) - nir_lower_io_impl(overload->impl); + nir_lower_io_impl(overload->impl, mode, type_size); + } +} + +/** + * Return the offset soruce for a load/store intrinsic. + */ +nir_src * +nir_get_io_offset_src(nir_intrinsic_instr *instr) +{ + switch (instr->intrinsic) { + case nir_intrinsic_load_input: + case nir_intrinsic_load_output: + case nir_intrinsic_load_uniform: + return &instr->src[0]; + case nir_intrinsic_load_ubo: + case nir_intrinsic_load_ssbo: + case nir_intrinsic_load_per_vertex_input: + case nir_intrinsic_load_per_vertex_output: + case nir_intrinsic_store_output: + return &instr->src[1]; + case nir_intrinsic_store_ssbo: + case nir_intrinsic_store_per_vertex_output: + return &instr->src[2]; + default: + return NULL; + } +} + +/** + * Return the vertex index source for a load/store per_vertex intrinsic. + */ +nir_src * +nir_get_io_vertex_index_src(nir_intrinsic_instr *instr) +{ + switch (instr->intrinsic) { + case nir_intrinsic_load_per_vertex_input: + case nir_intrinsic_load_per_vertex_output: + return &instr->src[0]; + case nir_intrinsic_store_per_vertex_output: + return &instr->src[1]; + default: + return NULL; } }