X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fglsl%2Fnir%2Fnir_lower_io.c;h=ec6d09d5b6d7f7eb295bfe92e5800a2629b0d94b;hb=d5c9955d3eaa7311e2b2350b6964bae516c7b7b2;hp=30fad855e6f1395c35a92c8bf5db467f45d6411f;hpb=050e4787d3526b8341dd76b59442356f9737ee96;p=mesa.git diff --git a/src/glsl/nir/nir_lower_io.c b/src/glsl/nir/nir_lower_io.c index 30fad855e6f..ec6d09d5b6d 100644 --- a/src/glsl/nir/nir_lower_io.c +++ b/src/glsl/nir/nir_lower_io.c @@ -63,80 +63,102 @@ nir_assign_var_locations(struct exec_list *var_list, unsigned *size, *size = location; } +/** + * Returns true if we're processing a stage whose inputs are arrays indexed + * by a vertex number (such as geometry shader inputs). + */ static bool -deref_has_indirect(nir_deref_var *deref) +is_per_vertex_input(struct lower_io_state *state, nir_variable *var) { - for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) { - if (tail->deref_type == nir_deref_type_array) { - nir_deref_array *arr = nir_deref_as_array(tail); - if (arr->deref_array_type == nir_deref_array_type_indirect) - return true; - } - } + gl_shader_stage stage = state->builder.shader->stage; - return false; + return var->data.mode == nir_var_shader_in && !var->data.patch && + (stage == MESA_SHADER_TESS_CTRL || + stage == MESA_SHADER_TESS_EVAL || + stage == MESA_SHADER_GEOMETRY); } -static unsigned -get_io_offset(nir_deref_var *deref, nir_instr *instr, nir_src *indirect, - struct lower_io_state *state) +static bool +is_per_vertex_output(struct lower_io_state *state, nir_variable *var) { - bool found_indirect = false; - unsigned base_offset = 0; - - nir_builder *b = &state->builder; - b->cursor = nir_before_instr(instr); + gl_shader_stage stage = state->builder.shader->stage; + return var->data.mode == nir_var_shader_out && !var->data.patch && + stage == MESA_SHADER_TESS_CTRL; +} +static nir_ssa_def * +get_io_offset(nir_builder *b, nir_deref_var *deref, + nir_ssa_def **vertex_index, + int (*type_size)(const struct glsl_type *)) +{ nir_deref *tail = &deref->deref; + + /* For per-vertex input arrays (i.e. geometry shader inputs), keep the + * outermost array index separate. Process the rest normally. + */ + if (vertex_index != NULL) { + tail = tail->child; + assert(tail->deref_type == nir_deref_type_array); + nir_deref_array *deref_array = nir_deref_as_array(tail); + + nir_ssa_def *vtx = nir_imm_int(b, deref_array->base_offset); + if (deref_array->deref_array_type == nir_deref_array_type_indirect) { + vtx = nir_iadd(b, vtx, nir_ssa_for_src(b, deref_array->indirect, 1)); + } + *vertex_index = vtx; + } + + /* Just emit code and let constant-folding go to town */ + nir_ssa_def *offset = nir_imm_int(b, 0); + while (tail->child != NULL) { const struct glsl_type *parent_type = tail->type; tail = tail->child; if (tail->deref_type == nir_deref_type_array) { nir_deref_array *deref_array = nir_deref_as_array(tail); - unsigned size = state->type_size(tail->type); + unsigned size = type_size(tail->type); - base_offset += size * deref_array->base_offset; + offset = nir_iadd(b, offset, + nir_imm_int(b, size * deref_array->base_offset)); if (deref_array->deref_array_type == nir_deref_array_type_indirect) { nir_ssa_def *mul = nir_imul(b, nir_imm_int(b, size), nir_ssa_for_src(b, deref_array->indirect, 1)); - if (found_indirect) { - indirect->ssa = - nir_iadd(b, nir_ssa_for_src(b, *indirect, 1), mul); - } else { - indirect->ssa = mul; - } - indirect->is_ssa = true; - found_indirect = true; + offset = nir_iadd(b, offset, mul); } } else if (tail->deref_type == nir_deref_type_struct) { nir_deref_struct *deref_struct = nir_deref_as_struct(tail); + unsigned field_offset = 0; for (unsigned i = 0; i < deref_struct->index; i++) { - base_offset += - state->type_size(glsl_get_struct_field(parent_type, i)); + field_offset += type_size(glsl_get_struct_field(parent_type, i)); } + offset = nir_iadd(b, offset, nir_imm_int(b, field_offset)); } } - return base_offset; + return offset; } static nir_intrinsic_op -load_op(nir_variable_mode mode, bool has_indirect) +load_op(struct lower_io_state *state, + nir_variable_mode mode, bool per_vertex) { nir_intrinsic_op op; switch (mode) { case nir_var_shader_in: - op = has_indirect ? nir_intrinsic_load_input_indirect : - nir_intrinsic_load_input; + op = per_vertex ? nir_intrinsic_load_per_vertex_input : + nir_intrinsic_load_input; + break; + case nir_var_shader_out: + op = per_vertex ? nir_intrinsic_load_per_vertex_output : + nir_intrinsic_load_output; break; case nir_var_uniform: - op = has_indirect ? nir_intrinsic_load_uniform_indirect : - nir_intrinsic_load_uniform; + op = nir_intrinsic_load_uniform; break; default: unreachable("Unknown variable mode"); @@ -149,6 +171,8 @@ nir_lower_io_block(nir_block *block, void *void_state) { struct lower_io_state *state = void_state; + nir_builder *b = &state->builder; + nir_foreach_instr_safe(block, instr) { if (instr->type != nir_instr_type_intrinsic) continue; @@ -161,35 +185,41 @@ nir_lower_io_block(nir_block *block, void *void_state) nir_variable_mode mode = intrin->variables[0]->var->data.mode; - if (state->mode != -1 && state->mode != mode) + if (state->mode != nir_var_all && state->mode != mode) continue; + if (mode != nir_var_shader_in && + mode != nir_var_shader_out && + mode != nir_var_uniform) + continue; + + b->cursor = nir_before_instr(instr); + switch (intrin->intrinsic) { case nir_intrinsic_load_var: { - if (mode != nir_var_shader_in && mode != nir_var_uniform) - continue; + bool per_vertex = + is_per_vertex_input(state, intrin->variables[0]->var) || + is_per_vertex_output(state, intrin->variables[0]->var); - bool has_indirect = deref_has_indirect(intrin->variables[0]); + nir_ssa_def *offset; + nir_ssa_def *vertex_index; + + offset = get_io_offset(b, intrin->variables[0], + per_vertex ? &vertex_index : NULL, + state->type_size); nir_intrinsic_instr *load = nir_intrinsic_instr_create(state->mem_ctx, - load_op(mode, has_indirect)); + load_op(state, mode, per_vertex)); load->num_components = intrin->num_components; - nir_src indirect; - unsigned offset = get_io_offset(intrin->variables[0], - &intrin->instr, &indirect, state); + load->const_index[0] = + intrin->variables[0]->var->data.driver_location; - unsigned location = intrin->variables[0]->var->data.driver_location; - if (mode == nir_var_uniform) { - load->const_index[0] = location; - load->const_index[1] = offset; - } else { - load->const_index[0] = location + offset; - } + if (per_vertex) + load->src[0] = nir_src_for_ssa(vertex_index); - if (has_indirect) - load->src[0] = indirect; + load->src[per_vertex ? 1 : 0] = nir_src_for_ssa(offset); if (intrin->dest.is_ssa) { nir_ssa_dest_init(&load->instr, &load->dest, @@ -206,33 +236,35 @@ nir_lower_io_block(nir_block *block, void *void_state) } case nir_intrinsic_store_var: { - if (intrin->variables[0]->var->data.mode != nir_var_shader_out) - continue; + assert(mode == nir_var_shader_out); - bool has_indirect = deref_has_indirect(intrin->variables[0]); + nir_ssa_def *offset; + nir_ssa_def *vertex_index; - nir_intrinsic_op store_op; - if (has_indirect) { - store_op = nir_intrinsic_store_output_indirect; - } else { - store_op = nir_intrinsic_store_output; - } + bool per_vertex = + is_per_vertex_output(state, intrin->variables[0]->var); + + offset = get_io_offset(b, intrin->variables[0], + per_vertex ? &vertex_index : NULL, + state->type_size); + + nir_intrinsic_op store_op = + per_vertex ? nir_intrinsic_store_per_vertex_output : + nir_intrinsic_store_output; nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx, store_op); store->num_components = intrin->num_components; - nir_src indirect; - unsigned offset = get_io_offset(intrin->variables[0], - &intrin->instr, &indirect, state); - offset += intrin->variables[0]->var->data.driver_location; + nir_src_copy(&store->src[0], &intrin->src[0], store); - store->const_index[0] = offset; + store->const_index[0] = + intrin->variables[0]->var->data.driver_location; - nir_src_copy(&store->src[0], &intrin->src[0], store); + if (per_vertex) + store->src[1] = nir_src_for_ssa(vertex_index); - if (has_indirect) - store->src[1] = indirect; + store->src[per_vertex ? 2 : 1] = nir_src_for_ssa(offset); nir_instr_insert_before(&intrin->instr, &store->instr); nir_instr_remove(&intrin->instr); @@ -274,3 +306,45 @@ nir_lower_io(nir_shader *shader, nir_variable_mode mode, nir_lower_io_impl(overload->impl, mode, type_size); } } + +/** + * Return the offset soruce for a load/store intrinsic. + */ +nir_src * +nir_get_io_offset_src(nir_intrinsic_instr *instr) +{ + switch (instr->intrinsic) { + case nir_intrinsic_load_input: + case nir_intrinsic_load_output: + case nir_intrinsic_load_uniform: + return &instr->src[0]; + case nir_intrinsic_load_ubo: + case nir_intrinsic_load_ssbo: + case nir_intrinsic_load_per_vertex_input: + case nir_intrinsic_load_per_vertex_output: + case nir_intrinsic_store_output: + return &instr->src[1]; + case nir_intrinsic_store_ssbo: + case nir_intrinsic_store_per_vertex_output: + return &instr->src[2]; + default: + return NULL; + } +} + +/** + * Return the vertex index source for a load/store per_vertex intrinsic. + */ +nir_src * +nir_get_io_vertex_index_src(nir_intrinsic_instr *instr) +{ + switch (instr->intrinsic) { + case nir_intrinsic_load_per_vertex_input: + case nir_intrinsic_load_per_vertex_output: + return &instr->src[0]; + case nir_intrinsic_store_per_vertex_output: + return &instr->src[1]; + default: + return NULL; + } +}