X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_shader_info.c;h=2c8d4e0511fc57f2f1a608c1c0a7c41e29820fea;hp=60e0cd22fb0c24cc685219545b16d58edc3b4807;hb=00973542ffe93224949ca9654757f8b0581e2ef9;hpb=7c50214aab0b590059fea15e4b7550cfa99855c2 diff --git a/src/amd/vulkan/radv_shader_info.c b/src/amd/vulkan/radv_shader_info.c index 60e0cd22fb0..2c8d4e0511f 100644 --- a/src/amd/vulkan/radv_shader_info.c +++ b/src/amd/vulkan/radv_shader_info.c @@ -151,6 +151,15 @@ set_output_usage_mask(const nir_shader *nir, const nir_intrinsic_instr *instr, ((wrmask >> (i * 4)) & 0xf) << comp; } +static void +set_writes_memory(const nir_shader *nir, struct radv_shader_info *info) +{ + if (nir->info.stage == MESA_SHADER_FRAGMENT) + info->ps.writes_memory = true; + else if (nir->info.stage == MESA_SHADER_GEOMETRY) + info->gs.writes_memory = true; +} + static void gather_intrinsic_store_deref_info(const nir_shader *nir, const nir_intrinsic_instr *instr, @@ -308,10 +317,7 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, instr->intrinsic == nir_intrinsic_image_deref_atomic_xor || instr->intrinsic == nir_intrinsic_image_deref_atomic_exchange || instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) { - if (nir->info.stage == MESA_SHADER_FRAGMENT) - info->ps.writes_memory = true; - else if (nir->info.stage == MESA_SHADER_GEOMETRY) - info->gs.writes_memory = true; + set_writes_memory(nir, info); } break; } @@ -326,17 +332,28 @@ gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr, case nir_intrinsic_ssbo_atomic_xor: case nir_intrinsic_ssbo_atomic_exchange: case nir_intrinsic_ssbo_atomic_comp_swap: - if (nir->info.stage == MESA_SHADER_FRAGMENT) - info->ps.writes_memory = true; - else if (nir->info.stage == MESA_SHADER_GEOMETRY) - info->gs.writes_memory = true; + set_writes_memory(nir, info); break; case nir_intrinsic_load_deref: gather_intrinsic_load_deref_info(nir, instr, info); break; case nir_intrinsic_store_deref: gather_intrinsic_store_deref_info(nir, instr, info); + /* fallthrough */ + case nir_intrinsic_deref_atomic_add: + case nir_intrinsic_deref_atomic_imin: + case nir_intrinsic_deref_atomic_umin: + case nir_intrinsic_deref_atomic_imax: + case nir_intrinsic_deref_atomic_umax: + case nir_intrinsic_deref_atomic_and: + case nir_intrinsic_deref_atomic_or: + case nir_intrinsic_deref_atomic_xor: + case nir_intrinsic_deref_atomic_exchange: + case nir_intrinsic_deref_atomic_comp_swap: { + if (nir_src_as_deref(instr->src[0])->mode & (nir_var_mem_global | nir_var_mem_ssbo)) + set_writes_memory(nir, info); break; + } default: break; } @@ -386,7 +403,7 @@ gather_info_input_decl_vs(const nir_shader *nir, const nir_variable *var, unsigned attrib_count = glsl_count_attribute_slots(var->type, true); int idx = var->data.location; - if (idx >= VERT_ATTRIB_GENERIC0 && idx <= VERT_ATTRIB_GENERIC15) + if (idx >= VERT_ATTRIB_GENERIC0 && idx < VERT_ATTRIB_GENERIC0 + MAX_VERTEX_ATTRIBS) info->vs.has_vertex_buffers = true; for (unsigned i = 0; i < attrib_count; ++i) { @@ -441,6 +458,9 @@ gather_info_input_decl_ps(const nir_shader *nir, const nir_variable *var, case VARYING_SLOT_CLIP_DIST1: info->ps.num_input_clips_culls += attrib_count; break; + case VARYING_SLOT_VIEWPORT: + info->ps.viewport_index_input = true; + break; default: break; } @@ -462,6 +482,8 @@ gather_info_input_decl_ps(const nir_shader *nir, const nir_variable *var, if (var->data.interpolation == INTERP_MODE_FLAT) info->ps.flat_shaded_mask |= mask << var->data.driver_location; + if (var->data.interpolation == INTERP_MODE_EXPLICIT) + info->ps.explicit_shaded_mask |= mask << var->data.driver_location; if (var->data.location >= VARYING_SLOT_VAR0) info->ps.input_mask |= mask << (var->data.location - VARYING_SLOT_VAR0); @@ -515,6 +537,17 @@ gather_info_output_decl_ps(const nir_shader *nir, const nir_variable *var, default: break; } + + if (idx >= FRAG_RESULT_DATA0 && idx <= FRAG_RESULT_DATA7) { + unsigned num_components = glsl_get_component_slots(glsl_without_array(var->type)); + unsigned num_slots = glsl_count_attribute_slots(var->type, false); + unsigned write_mask = (1 << num_components) - 1; + unsigned slot = idx - FRAG_RESULT_DATA0; + + for (unsigned i = 0; i < num_slots; i++) { + info->ps.cb_shader_mask |= write_mask << ((slot + i) * 4); + } + } } static void @@ -634,7 +667,8 @@ void radv_nir_shader_info_pass(const struct nir_shader *nir, const struct radv_pipeline_layout *layout, const struct radv_shader_variant_key *key, - struct radv_shader_info *info) + struct radv_shader_info *info, + bool use_llvm) { struct nir_function *func = (struct nir_function *)exec_list_get_head_const(&nir->functions); @@ -645,14 +679,14 @@ radv_nir_shader_info_pass(const struct nir_shader *nir, info->loads_dynamic_offsets = true; } - nir_foreach_variable(variable, &nir->inputs) + nir_foreach_shader_in_variable(variable, nir) gather_info_input_decl(nir, variable, info, key); nir_foreach_block(block, func->impl) { gather_info_block(nir, block, info); } - nir_foreach_variable(variable, &nir->outputs) + nir_foreach_shader_out_variable(variable, nir) gather_info_output_decl(nir, variable, info, key); if (nir->info.stage == MESA_SHADER_VERTEX || @@ -711,6 +745,23 @@ radv_nir_shader_info_pass(const struct nir_shader *nir, } } + /* Make sure to export the ViewportIndex if the fragment shader needs it. */ + if (key->vs_common_out.export_viewport_index) { + switch (nir->info.stage) { + case MESA_SHADER_VERTEX: + info->vs.output_usage_mask[VARYING_SLOT_VIEWPORT] |= 0x1; + break; + case MESA_SHADER_TESS_EVAL: + info->tes.output_usage_mask[VARYING_SLOT_VIEWPORT] |= 0x1; + break; + case MESA_SHADER_GEOMETRY: + info->gs.output_usage_mask[VARYING_SLOT_VIEWPORT] |= 0x1; + break; + default: + break; + } + } + if (nir->info.stage == MESA_SHADER_FRAGMENT) info->ps.num_interp = nir->num_inputs; @@ -723,6 +774,7 @@ radv_nir_shader_info_pass(const struct nir_shader *nir, info->ps.can_discard = nir->info.fs.uses_discard; info->ps.early_fragment_test = nir->info.fs.early_fragment_tests; info->ps.post_depth_coverage = nir->info.fs.post_depth_coverage; + info->ps.depth_layout = nir->info.fs.depth_layout; break; case MESA_SHADER_GEOMETRY: info->gs.vertices_in = nir->info.gs.vertices_in; @@ -738,6 +790,7 @@ radv_nir_shader_info_pass(const struct nir_shader *nir, info->tes.as_es = key->vs_common_out.as_es; info->tes.export_prim_id = key->vs_common_out.export_prim_id; info->is_ngg = key->vs_common_out.as_ngg; + info->is_ngg_passthrough = key->vs_common_out.as_ngg_passthrough; break; case MESA_SHADER_TESS_CTRL: info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out; @@ -747,6 +800,7 @@ radv_nir_shader_info_pass(const struct nir_shader *nir, info->vs.as_ls = key->vs_common_out.as_ls; info->vs.export_prim_id = key->vs_common_out.export_prim_id; info->is_ngg = key->vs_common_out.as_ngg; + info->is_ngg_passthrough = key->vs_common_out.as_ngg_passthrough; break; default: break; @@ -767,18 +821,45 @@ radv_nir_shader_info_pass(const struct nir_shader *nir, key->vs_common_out.as_es) { struct radv_es_output_info *es_info = nir->info.stage == MESA_SHADER_VERTEX ? &info->vs.es_info : &info->tes.es_info; - uint32_t max_output_written = 0; - uint64_t output_mask = nir->info.outputs_written; - while (output_mask) { - const int i = u_bit_scan64(&output_mask); - unsigned param_index = shader_io_get_unique_index(i); + if (use_llvm) { + /* The outputs may contain gaps, use the highest output index + 1 */ + uint32_t max_output_written = 0; + uint64_t output_mask = nir->info.outputs_written; - max_output_written = MAX2(param_index, max_output_written); - } + while (output_mask) { + const int i = u_bit_scan64(&output_mask); + unsigned param_index = shader_io_get_unique_index(i); - es_info->esgs_itemsize = (max_output_written + 1) * 16; + max_output_written = MAX2(param_index, max_output_written); + } + es_info->esgs_itemsize = (max_output_written + 1) * 16; + } else { + /* The outputs don't contain gaps, se we can use the number of outputs */ + uint32_t num_outputs_written = nir->info.stage == MESA_SHADER_VERTEX + ? info->vs.num_linked_outputs + : info->tes.num_linked_outputs; + es_info->esgs_itemsize = num_outputs_written * 16; + } } info->float_controls_mode = nir->info.float_controls_execution_mode; + + if (nir->info.stage == MESA_SHADER_FRAGMENT) { + /* If the i-th output is used, all previous outputs must be + * non-zero to match the target format. + * TODO: compact MRT to avoid holes and to remove this + * workaround. + */ + unsigned num_targets = (util_last_bit(info->ps.cb_shader_mask) + 3) / 4; + for (unsigned i = 0; i < num_targets; i++) { + if (!(info->ps.cb_shader_mask & (0xf << (i * 4)))) { + info->ps.cb_shader_mask |= 0xf << (i * 4); + } + } + + if (key->fs.is_dual_src) { + info->ps.cb_shader_mask |= (info->ps.cb_shader_mask & 0xf) << 4; + } + } }