X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fnir%2Fnir_gather_info.c;h=d902fb7b69ec425132deb55dbfa55ba0761f1669;hb=a18c4ee7b07cb0c78b7d93005cc76eded4e8001c;hp=7eaa4c27c1f224b8d171de20048bcae0f3a8d689;hpb=760859cac219d9ea4d29f58351161ea8de0bcc44;p=mesa.git diff --git a/src/compiler/nir/nir_gather_info.c b/src/compiler/nir/nir_gather_info.c index 7eaa4c27c1f..d902fb7b69e 100644 --- a/src/compiler/nir/nir_gather_info.c +++ b/src/compiler/nir/nir_gather_info.c @@ -22,11 +22,54 @@ */ #include "nir.h" +#include "nir_deref.h" #include "main/menums.h" +static void +get_deref_info(nir_shader *shader, nir_variable *var, nir_deref_instr *deref, + bool *cross_invocation, bool *indirect) +{ + *cross_invocation = false; + *indirect = false; + + const bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage); + + nir_deref_path path; + nir_deref_path_init(&path, deref, NULL); + assert(path.path[0]->deref_type == nir_deref_type_var); + nir_deref_instr **p = &path.path[1]; + + /* Vertex index is the outermost array index. */ + if (per_vertex) { + assert((*p)->deref_type == nir_deref_type_array); + nir_instr *vertex_index_instr = (*p)->arr.index.ssa->parent_instr; + *cross_invocation = + vertex_index_instr->type != nir_instr_type_intrinsic || + nir_instr_as_intrinsic(vertex_index_instr)->intrinsic != + nir_intrinsic_load_invocation_id; + p++; + } + + /* We always lower indirect dereferences for "compact" array vars. */ + if (!path.path[0]->var->data.compact) { + /* Non-compact array vars: find out if they are indirect. */ + for (; *p; p++) { + if ((*p)->deref_type == nir_deref_type_array) { + *indirect |= !nir_src_is_const((*p)->arr.index); + } else if ((*p)->deref_type == nir_deref_type_struct) { + /* Struct indices are always constant. */ + } else { + unreachable("Unsupported deref type"); + } + } + } + + nir_deref_path_finish(&path); +} + static void set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len, - bool is_output_read) + nir_deref_instr *deref, bool is_output_read) { for (int i = 0; i < len; i++) { assert(var->data.location != -1); @@ -48,11 +91,23 @@ set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len, bitfield = BITFIELD64_BIT(idx); } + bool cross_invocation; + bool indirect; + get_deref_info(shader, var, deref, &cross_invocation, &indirect); + if (var->data.mode == nir_var_shader_in) { - if (is_patch_generic) + if (is_patch_generic) { shader->info.patch_inputs_read |= bitfield; - else + if (indirect) + shader->info.patch_inputs_read_indirectly |= bitfield; + } else { shader->info.inputs_read |= bitfield; + if (indirect) + shader->info.inputs_read_indirectly |= bitfield; + } + + if (cross_invocation) + shader->info.tess.tcs_cross_invocation_inputs_read |= bitfield; if (shader->info.stage == MESA_SHADER_FRAGMENT) { shader->info.fs.uses_sample_qualifier |= var->data.sample; @@ -62,16 +117,27 @@ set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len, if (is_output_read) { if (is_patch_generic) { shader->info.patch_outputs_read |= bitfield; + if (indirect) + shader->info.patch_outputs_accessed_indirectly |= bitfield; } else { shader->info.outputs_read |= bitfield; + if (indirect) + shader->info.outputs_accessed_indirectly |= bitfield; } + + if (cross_invocation) + shader->info.tess.tcs_cross_invocation_outputs_read |= bitfield; } else { - if (is_patch_generic) { - shader->info.patch_outputs_written |= bitfield; - } else if (!var->data.read_only) { - shader->info.outputs_written |= bitfield; - } - } + if (is_patch_generic) { + shader->info.patch_outputs_written |= bitfield; + if (indirect) + shader->info.patch_outputs_accessed_indirectly |= bitfield; + } else if (!var->data.read_only) { + shader->info.outputs_written |= bitfield; + if (indirect) + shader->info.outputs_accessed_indirectly |= bitfield; + } + } if (var->data.fb_fetch_output) @@ -85,7 +151,8 @@ set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len, * represents a shader input or output. */ static void -mark_whole_variable(nir_shader *shader, nir_variable *var, bool is_output_read) +mark_whole_variable(nir_shader *shader, nir_variable *var, + nir_deref_instr *deref, bool is_output_read) { const struct glsl_type *type = var->type; @@ -98,16 +165,19 @@ mark_whole_variable(nir_shader *shader, nir_variable *var, bool is_output_read) var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4) : glsl_count_attribute_slots(type, false); - set_io_mask(shader, var, 0, slots, is_output_read); + set_io_mask(shader, var, 0, slots, deref, is_output_read); } static unsigned -get_io_offset(nir_deref_instr *deref, bool is_vertex_input) +get_io_offset(nir_deref_instr *deref, bool is_vertex_input, bool per_vertex) { unsigned offset = 0; for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) { if (d->deref_type == nir_deref_type_array) { + if (per_vertex && nir_deref_instr_parent(d)->deref_type == nir_deref_type_var) + break; + if (!nir_src_is_const(d->arr.index)) return -1; @@ -132,8 +202,9 @@ try_mask_partial_io(nir_shader *shader, nir_variable *var, nir_deref_instr *deref, bool is_output_read) { const struct glsl_type *type = var->type; + bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage); - if (nir_is_per_vertex_io(var, shader->info.stage)) { + if (per_vertex) { assert(glsl_type_is_array(type)); type = glsl_get_array_element(type); } @@ -157,7 +228,7 @@ try_mask_partial_io(nir_shader *shader, nir_variable *var, return false; } - unsigned offset = get_io_offset(deref, false); + unsigned offset = get_io_offset(deref, false, per_vertex); if (offset == -1) return false; @@ -189,7 +260,7 @@ try_mask_partial_io(nir_shader *shader, nir_variable *var, return false; } - set_io_mask(shader, var, offset, elem_width, is_output_read); + set_io_mask(shader, var, offset, elem_width, deref, is_output_read); return true; } @@ -198,6 +269,10 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader, void *dead_ctx) { switch (instr->intrinsic) { + case nir_intrinsic_demote: + case nir_intrinsic_demote_if: + shader->info.fs.uses_demote = true; + /* fallthrough: quads with helper lanes only might be discarded entirely */ case nir_intrinsic_discard: case nir_intrinsic_discard_if: assert(shader->info.stage == MESA_SHADER_FRAGMENT); @@ -207,6 +282,7 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader, case nir_intrinsic_interp_deref_at_centroid: case nir_intrinsic_interp_deref_at_sample: case nir_intrinsic_interp_deref_at_offset: + case nir_intrinsic_interp_deref_at_vertex: case nir_intrinsic_load_deref: case nir_intrinsic_store_deref:{ nir_deref_instr *deref = nir_src_as_deref(instr->src[0]); @@ -219,7 +295,7 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader, is_output_read = true; if (!try_mask_partial_io(shader, var, deref, is_output_read)) - mark_whole_variable(shader, var, is_output_read); + mark_whole_variable(shader, var, deref, is_output_read); /* We need to track which input_reads bits correspond to a * dvec3/dvec4 input attribute */ @@ -237,6 +313,7 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader, case nir_intrinsic_load_draw_id: case nir_intrinsic_load_frag_coord: + case nir_intrinsic_load_point_coord: case nir_intrinsic_load_front_face: case nir_intrinsic_load_vertex_id: case nir_intrinsic_load_vertex_id_zero_base: @@ -262,6 +339,14 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader, (1ull << nir_system_value_from_intrinsic(instr->intrinsic)); break; + case nir_intrinsic_quad_broadcast: + case nir_intrinsic_quad_swap_horizontal: + case nir_intrinsic_quad_swap_vertical: + case nir_intrinsic_quad_swap_diagonal: + if (shader->info.stage == MESA_SHADER_FRAGMENT) + shader->info.fs.needs_helper_invocations = true; + break; + case nir_intrinsic_end_primitive: case nir_intrinsic_end_primitive_with_counter: assert(shader->info.stage == MESA_SHADER_GEOMETRY); @@ -269,11 +354,105 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader, /* fall through */ case nir_intrinsic_emit_vertex: + case nir_intrinsic_emit_vertex_with_counter: if (nir_intrinsic_stream_id(instr) > 0) shader->info.gs.uses_streams = true; break; + case nir_intrinsic_bindless_image_atomic_add: + case nir_intrinsic_bindless_image_atomic_and: + case nir_intrinsic_bindless_image_atomic_comp_swap: + case nir_intrinsic_bindless_image_atomic_dec_wrap: + case nir_intrinsic_bindless_image_atomic_exchange: + case nir_intrinsic_bindless_image_atomic_fadd: + case nir_intrinsic_bindless_image_atomic_imax: + case nir_intrinsic_bindless_image_atomic_imin: + case nir_intrinsic_bindless_image_atomic_inc_wrap: + case nir_intrinsic_bindless_image_atomic_or: + case nir_intrinsic_bindless_image_atomic_umax: + case nir_intrinsic_bindless_image_atomic_umin: + case nir_intrinsic_bindless_image_atomic_xor: + case nir_intrinsic_bindless_image_store: + case nir_intrinsic_bindless_image_store_raw_intel: + case nir_intrinsic_global_atomic_add: + case nir_intrinsic_global_atomic_and: + case nir_intrinsic_global_atomic_comp_swap: + case nir_intrinsic_global_atomic_exchange: + case nir_intrinsic_global_atomic_fadd: + case nir_intrinsic_global_atomic_fcomp_swap: + case nir_intrinsic_global_atomic_fmax: + case nir_intrinsic_global_atomic_fmin: + case nir_intrinsic_global_atomic_imax: + case nir_intrinsic_global_atomic_imin: + case nir_intrinsic_global_atomic_or: + case nir_intrinsic_global_atomic_umax: + case nir_intrinsic_global_atomic_umin: + case nir_intrinsic_global_atomic_xor: + case nir_intrinsic_image_atomic_add: + case nir_intrinsic_image_atomic_and: + case nir_intrinsic_image_atomic_comp_swap: + case nir_intrinsic_image_atomic_dec_wrap: + case nir_intrinsic_image_atomic_exchange: + case nir_intrinsic_image_atomic_fadd: + case nir_intrinsic_image_atomic_imax: + case nir_intrinsic_image_atomic_imin: + case nir_intrinsic_image_atomic_inc_wrap: + case nir_intrinsic_image_atomic_or: + case nir_intrinsic_image_atomic_umax: + case nir_intrinsic_image_atomic_umin: + case nir_intrinsic_image_atomic_xor: + case nir_intrinsic_image_deref_atomic_add: + case nir_intrinsic_image_deref_atomic_and: + case nir_intrinsic_image_deref_atomic_comp_swap: + case nir_intrinsic_image_deref_atomic_dec_wrap: + case nir_intrinsic_image_deref_atomic_exchange: + case nir_intrinsic_image_deref_atomic_fadd: + case nir_intrinsic_image_deref_atomic_imax: + case nir_intrinsic_image_deref_atomic_imin: + case nir_intrinsic_image_deref_atomic_inc_wrap: + case nir_intrinsic_image_deref_atomic_or: + case nir_intrinsic_image_deref_atomic_umax: + case nir_intrinsic_image_deref_atomic_umin: + case nir_intrinsic_image_deref_atomic_xor: + case nir_intrinsic_image_deref_store: + case nir_intrinsic_image_deref_store_raw_intel: + case nir_intrinsic_image_store: + case nir_intrinsic_image_store_raw_intel: + case nir_intrinsic_ssbo_atomic_add: + case nir_intrinsic_ssbo_atomic_add_ir3: + case nir_intrinsic_ssbo_atomic_and: + case nir_intrinsic_ssbo_atomic_and_ir3: + case nir_intrinsic_ssbo_atomic_comp_swap: + case nir_intrinsic_ssbo_atomic_comp_swap_ir3: + case nir_intrinsic_ssbo_atomic_exchange: + case nir_intrinsic_ssbo_atomic_exchange_ir3: + case nir_intrinsic_ssbo_atomic_fadd: + case nir_intrinsic_ssbo_atomic_fcomp_swap: + case nir_intrinsic_ssbo_atomic_fmax: + case nir_intrinsic_ssbo_atomic_fmin: + case nir_intrinsic_ssbo_atomic_imax: + case nir_intrinsic_ssbo_atomic_imax_ir3: + case nir_intrinsic_ssbo_atomic_imin: + case nir_intrinsic_ssbo_atomic_imin_ir3: + case nir_intrinsic_ssbo_atomic_or: + case nir_intrinsic_ssbo_atomic_or_ir3: + case nir_intrinsic_ssbo_atomic_umax: + case nir_intrinsic_ssbo_atomic_umax_ir3: + case nir_intrinsic_ssbo_atomic_umin: + case nir_intrinsic_ssbo_atomic_umin_ir3: + case nir_intrinsic_ssbo_atomic_xor: + case nir_intrinsic_ssbo_atomic_xor_ir3: + case nir_intrinsic_store_global: + case nir_intrinsic_store_global_ir3: + case nir_intrinsic_store_ssbo: + case nir_intrinsic_store_ssbo_ir3: + /* Only set this for globally visible memory, not scratch and not + * shared. + */ + shader->info.writes_memory = true; + break; + default: break; } @@ -282,17 +461,14 @@ gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader, static void gather_tex_info(nir_tex_instr *instr, nir_shader *shader) { + if (shader->info.stage == MESA_SHADER_FRAGMENT && + nir_tex_instr_has_implicit_derivative(instr)) + shader->info.fs.needs_helper_invocations = true; + switch (instr->op) { case nir_texop_tg4: shader->info.uses_texture_gather = true; break; - case nir_texop_txf: - case nir_texop_txf_ms: - case nir_texop_txf_ms_mcs: - shader->info.textures_used_by_txf |= - ((1 << MAX2(instr->texture_array_size, 1)) - 1) << - instr->texture_index; - break; default: break; } @@ -305,10 +481,23 @@ gather_alu_info(nir_alu_instr *instr, nir_shader *shader) case nir_op_fddx: case nir_op_fddy: shader->info.uses_fddx_fddy = true; + /* Fall through */ + case nir_op_fddx_fine: + case nir_op_fddy_fine: + case nir_op_fddx_coarse: + case nir_op_fddy_coarse: + if (shader->info.stage == MESA_SHADER_FRAGMENT) + shader->info.fs.needs_helper_invocations = true; break; default: break; } + + shader->info.uses_64bit |= instr->dest.dest.ssa.bit_size == 64; + unsigned num_srcs = nir_op_infos[instr->op].num_inputs; + for (unsigned i = 0; i < num_srcs; i++) { + shader->info.uses_64bit |= nir_src_bit_size(instr->src[i].src) == 64; + } } static void @@ -334,56 +523,25 @@ gather_info_block(nir_block *block, nir_shader *shader, void *dead_ctx) } } -static unsigned -glsl_type_get_sampler_count(const struct glsl_type *type) -{ - if (glsl_type_is_array(type)) { - return (glsl_get_aoa_size(type) * - glsl_type_get_sampler_count(glsl_without_array(type))); - } - - if (glsl_type_is_struct(type)) { - unsigned count = 0; - for (int i = 0; i < glsl_get_length(type); i++) - count += glsl_type_get_sampler_count(glsl_get_struct_field(type, i)); - return count; - } - - if (glsl_type_is_sampler(type)) - return 1; - - return 0; -} - -static unsigned -glsl_type_get_image_count(const struct glsl_type *type) -{ - if (glsl_type_is_array(type)) { - return (glsl_get_aoa_size(type) * - glsl_type_get_image_count(glsl_without_array(type))); - } - - if (glsl_type_is_struct(type)) { - unsigned count = 0; - for (int i = 0; i < glsl_get_length(type); i++) - count += glsl_type_get_image_count(glsl_get_struct_field(type, i)); - return count; - } - - if (glsl_type_is_image(type)) - return 1; - - return 0; -} - void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint) { shader->info.num_textures = 0; shader->info.num_images = 0; + shader->info.last_msaa_image = -1; + nir_foreach_variable(var, &shader->uniforms) { + /* Bindless textures and images don't use non-bindless slots. */ + if (var->data.bindless) + continue; + shader->info.num_textures += glsl_type_get_sampler_count(var->type); shader->info.num_images += glsl_type_get_image_count(var->type); + + /* Assuming image slots don't have holes (e.g. OpenGL) */ + if (glsl_type_is_image(var->type) && + glsl_get_sampler_dim(var->type) == GLSL_SAMPLER_DIM_MS) + shader->info.last_msaa_image = shader->info.num_images - 1; } shader->info.inputs_read = 0; @@ -393,13 +551,27 @@ nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint) shader->info.patch_inputs_read = 0; shader->info.patch_outputs_written = 0; shader->info.system_values_read = 0; + shader->info.inputs_read_indirectly = 0; + shader->info.outputs_accessed_indirectly = 0; + shader->info.patch_inputs_read_indirectly = 0; + shader->info.patch_outputs_accessed_indirectly = 0; + if (shader->info.stage == MESA_SHADER_VERTEX) { shader->info.vs.double_inputs = 0; } if (shader->info.stage == MESA_SHADER_FRAGMENT) { shader->info.fs.uses_sample_qualifier = false; + shader->info.fs.uses_discard = false; + shader->info.fs.uses_demote = false; + shader->info.fs.needs_helper_invocations = false; + } + if (shader->info.stage == MESA_SHADER_TESS_CTRL) { + shader->info.tess.tcs_cross_invocation_inputs_read = 0; + shader->info.tess.tcs_cross_invocation_outputs_read = 0; } + shader->info.writes_memory = shader->info.has_transform_feedback_varyings; + void *dead_ctx = ralloc_context(NULL); nir_foreach_block(block, entrypoint) { gather_info_block(block, shader, dead_ctx);