+static void
+add_parameter(struct gl_uniform_storage *uniform,
+ struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ const struct glsl_type *type,
+ struct nir_link_uniforms_state *state)
+{
+ if (!state->params || uniform->is_shader_storage ||
+ (glsl_contains_opaque(type) && !state->current_var->data.bindless))
+ return;
+
+ unsigned num_params = glsl_get_aoa_size(type);
+ num_params = MAX2(num_params, 1);
+ num_params *= glsl_get_matrix_columns(glsl_without_array(type));
+
+ bool is_dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
+ if (is_dual_slot)
+ num_params *= 2;
+
+ struct gl_program_parameter_list *params = state->params;
+ int base_index = params->NumParameters;
+ _mesa_reserve_parameter_storage(params, num_params);
+
+ if (ctx->Const.PackedDriverUniformStorage) {
+ for (unsigned i = 0; i < num_params; i++) {
+ unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
+ unsigned comps = glsl_get_vector_elements(glsl_without_array(type)) * dmul;
+ if (is_dual_slot) {
+ if (i & 0x1)
+ comps -= 4;
+ else
+ comps = 4;
+ }
+
+ _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name, comps,
+ glsl_get_gl_type(type), NULL, NULL, false);
+ }
+ } else {
+ for (unsigned i = 0; i < num_params; i++) {
+ _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name, 4,
+ glsl_get_gl_type(type), NULL, NULL, true);
+ }
+ }
+
+ /* Each Parameter will hold the index to the backing uniform storage.
+ * This avoids relying on names to match parameters and uniform
+ * storages.
+ */
+ for (unsigned i = 0; i < num_params; i++) {
+ struct gl_program_parameter *param = ¶ms->Parameters[base_index + i];
+ param->UniformStorageIndex = uniform - prog->data->UniformStorage;
+ param->MainUniformStorageIndex = state->current_var->data.location;
+ }
+}
+
+static unsigned
+get_next_index(struct nir_link_uniforms_state *state,
+ const struct gl_uniform_storage *uniform,
+ unsigned *next_index, bool *initialised)
+{
+ /* If we’ve already calculated an index for this member then we can just
+ * offset from there.
+ */
+ if (state->current_type->next_index == UINT_MAX) {
+ /* Otherwise we need to reserve enough indices for all of the arrays
+ * enclosing this member.
+ */
+
+ unsigned array_size = 1;
+
+ for (const struct type_tree_entry *p = state->current_type;
+ p;
+ p = p->parent) {
+ array_size *= p->array_size;
+ }
+
+ state->current_type->next_index = *next_index;
+ *next_index += array_size;
+ *initialised = true;
+ } else
+ *initialised = false;
+
+ unsigned index = state->current_type->next_index;
+
+ state->current_type->next_index += MAX2(1, uniform->array_elements);
+
+ return index;
+}
+
+/* Update the uniforms info for the current shader stage */
+static void
+update_uniforms_shader_info(struct gl_shader_program *prog,
+ struct nir_link_uniforms_state *state,
+ struct gl_uniform_storage *uniform,
+ const struct glsl_type *type,
+ unsigned stage)
+{
+ unsigned values = glsl_get_component_slots(type);
+ const struct glsl_type *type_no_array = glsl_without_array(type);
+
+ if (glsl_type_is_sampler(type_no_array)) {
+ bool init_idx;
+ unsigned *next_index = state->current_var->data.bindless ?
+ &state->next_bindless_sampler_index :
+ &state->next_sampler_index;
+ int sampler_index = get_next_index(state, uniform, next_index, &init_idx);
+ struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
+
+ if (state->current_var->data.bindless) {
+ if (init_idx) {
+ sh->Program->sh.BindlessSamplers =
+ rerzalloc(sh->Program, sh->Program->sh.BindlessSamplers,
+ struct gl_bindless_sampler,
+ sh->Program->sh.NumBindlessSamplers,
+ state->next_bindless_sampler_index);
+
+ for (unsigned j = sh->Program->sh.NumBindlessSamplers;
+ j < state->next_bindless_sampler_index; j++) {
+ sh->Program->sh.BindlessSamplers[j].target =
+ glsl_get_sampler_target(type_no_array);
+ }
+
+ sh->Program->sh.NumBindlessSamplers =
+ state->next_bindless_sampler_index;
+ }
+
+ if (!state->var_is_in_block)
+ state->num_shader_uniform_components += values;
+ } else {
+ /* Samplers (bound or bindless) are counted as two components
+ * as specified by ARB_bindless_texture.
+ */
+ state->num_shader_samplers += values / 2;
+
+ if (init_idx) {
+ const unsigned shadow = glsl_sampler_type_is_shadow(type_no_array);
+ for (unsigned i = sampler_index;
+ i < MIN2(state->next_sampler_index, MAX_SAMPLERS); i++) {
+ sh->Program->sh.SamplerTargets[i] =
+ glsl_get_sampler_target(type_no_array);
+ state->shader_samplers_used |= 1U << i;
+ state->shader_shadow_samplers |= shadow << i;
+ }
+ }
+ }
+
+ uniform->opaque[stage].active = true;
+ uniform->opaque[stage].index = sampler_index;
+ } else if (glsl_type_is_image(type_no_array)) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
+
+ /* Set image access qualifiers */
+ enum gl_access_qualifier image_access =
+ state->current_var->data.access;
+ const GLenum access =
+ (image_access & ACCESS_NON_WRITEABLE) ?
+ ((image_access & ACCESS_NON_READABLE) ? GL_NONE :
+ GL_READ_ONLY) :
+ ((image_access & ACCESS_NON_READABLE) ? GL_WRITE_ONLY :
+ GL_READ_WRITE);
+
+ int image_index;
+ if (state->current_var->data.bindless) {
+ image_index = state->next_bindless_image_index;
+ state->next_bindless_image_index += MAX2(1, uniform->array_elements);
+
+ sh->Program->sh.BindlessImages =
+ rerzalloc(sh->Program, sh->Program->sh.BindlessImages,
+ struct gl_bindless_image,
+ sh->Program->sh.NumBindlessImages,
+ state->next_bindless_image_index);
+
+ for (unsigned j = sh->Program->sh.NumBindlessImages;
+ j < state->next_bindless_image_index; j++) {
+ sh->Program->sh.BindlessImages[j].access = access;
+ }
+
+ sh->Program->sh.NumBindlessImages = state->next_bindless_image_index;
+
+ } else {
+ image_index = state->next_image_index;
+ state->next_image_index += MAX2(1, uniform->array_elements);
+
+ /* Images (bound or bindless) are counted as two components as
+ * specified by ARB_bindless_texture.
+ */
+ state->num_shader_images += values / 2;
+
+ for (unsigned i = image_index;
+ i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS); i++) {
+ sh->Program->sh.ImageAccess[i] = access;
+ }
+ }
+
+ uniform->opaque[stage].active = true;
+ uniform->opaque[stage].index = image_index;
+
+ if (!uniform->is_shader_storage)
+ state->num_shader_uniform_components += values;
+ } else {
+ if (glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
+
+ uniform->opaque[stage].index = state->next_subroutine;
+ uniform->opaque[stage].active = true;
+
+ sh->Program->sh.NumSubroutineUniforms++;
+
+ /* Increment the subroutine index by 1 for non-arrays and by the
+ * number of array elements for arrays.
+ */
+ state->next_subroutine += MAX2(1, uniform->array_elements);
+ }
+
+ if (!state->var_is_in_block)
+ state->num_shader_uniform_components += values;
+ }
+}
+
+static bool
+find_and_update_named_uniform_storage(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct nir_link_uniforms_state *state,
+ nir_variable *var, char **name,
+ size_t name_length,
+ const struct glsl_type *type,
+ unsigned stage, bool *first_element)
+{
+ /* gl_uniform_storage can cope with one level of array, so if the type is a
+ * composite type or an array where each element occupies more than one
+ * location than we need to recursively process it.
+ */
+ if (glsl_type_is_struct_or_ifc(type) ||
+ (glsl_type_is_array(type) &&
+ (glsl_type_is_array(glsl_get_array_element(type)) ||
+ glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
+
+ struct type_tree_entry *old_type = state->current_type;
+ state->current_type = old_type->children;
+
+ /* Shader storage block unsized arrays: add subscript [0] to variable
+ * names.
+ */
+ unsigned length = glsl_get_length(type);
+ if (glsl_type_is_unsized_array(type))
+ length = 1;
+
+ bool result = false;
+ for (unsigned i = 0; i < length; i++) {
+ const struct glsl_type *field_type;
+ size_t new_length = name_length;
+
+ if (glsl_type_is_struct_or_ifc(type)) {
+ field_type = glsl_get_struct_field(type, i);
+
+ /* Append '.field' to the current variable name. */
+ if (name) {
+ ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
+ glsl_get_struct_elem_name(type, i));
+ }
+ } else {
+ field_type = glsl_get_array_element(type);
+
+ /* Append the subscript to the current variable name */
+ if (name)
+ ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
+ }
+
+ result = find_and_update_named_uniform_storage(ctx, prog, state,
+ var, name, new_length,
+ field_type, stage,
+ first_element);
+
+ if (glsl_type_is_struct_or_ifc(type))
+ state->current_type = state->current_type->next_sibling;
+
+ if (!result) {
+ state->current_type = old_type;
+ return false;
+ }
+ }
+
+ state->current_type = old_type;
+
+ return result;
+ } else {
+ struct hash_entry *entry =
+ _mesa_hash_table_search(state->uniform_hash, *name);
+ if (entry) {
+ unsigned i = (unsigned) (intptr_t) entry->data;
+ struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
+
+ if (*first_element && !state->var_is_in_block) {
+ *first_element = false;
+ var->data.location = uniform - prog->data->UniformStorage;
+ }
+
+ update_uniforms_shader_info(prog, state, uniform, type, stage);
+
+ const struct glsl_type *type_no_array = glsl_without_array(type);
+ struct hash_entry *entry = prog->data->spirv ? NULL :
+ _mesa_hash_table_search(state->referenced_uniforms[stage],
+ state->current_var->name);
+ if (entry != NULL ||
+ glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE ||
+ prog->data->spirv)
+ uniform->active_shader_mask |= 1 << stage;
+
+ if (!state->var_is_in_block)
+ add_parameter(uniform, ctx, prog, type, state);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Finds, returns, and updates the stage info for any uniform in UniformStorage
+ * defined by @var. For GLSL this is done using the name, for SPIR-V in general
+ * is this done using the explicit location, except:
+ *
+ * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
+ * them. That means that more that one entry at the uniform storage can be
+ * found. In that case all of them are updated, and the first entry is
+ * returned, in order to update the location of the nir variable.
+ *
+ * * Special uniforms: like atomic counters. They lack a explicit location,
+ * so they are skipped. They will be handled and assigned a location later.
+ *
+ */
+static bool
+find_and_update_previous_uniform_storage(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct nir_link_uniforms_state *state,
+ nir_variable *var, char *name,
+ const struct glsl_type *type,
+ unsigned stage)
+{
+ if (!prog->data->spirv) {
+ bool first_element = true;
+ char *name_tmp = ralloc_strdup(NULL, name);
+ bool r = find_and_update_named_uniform_storage(ctx, prog, state, var,
+ &name_tmp,
+ strlen(name_tmp), type,
+ stage, &first_element);
+ ralloc_free(name_tmp);
+
+ return r;
+ }
+
+ if (nir_variable_is_in_block(var)) {
+ struct gl_uniform_storage *uniform = NULL;
+
+ ASSERTED unsigned num_blks = nir_variable_is_in_ubo(var) ?
+ prog->data->NumUniformBlocks :
+ prog->data->NumShaderStorageBlocks;
+
+ struct gl_uniform_block *blks = nir_variable_is_in_ubo(var) ?
+ prog->data->UniformBlocks : prog->data->ShaderStorageBlocks;
+
+ bool result = false;
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ /* UniformStorage contains both variables from ubos and ssbos */
+ if ( prog->data->UniformStorage[i].is_shader_storage !=
+ nir_variable_is_in_ssbo(var))
+ continue;
+
+ int block_index = prog->data->UniformStorage[i].block_index;
+ if (block_index != -1) {
+ assert(block_index < num_blks);
+
+ if (var->data.binding == blks[block_index].Binding) {
+ if (!uniform)
+ uniform = &prog->data->UniformStorage[i];
+ mark_stage_as_active(&prog->data->UniformStorage[i],
+ stage);
+ result = true;
+ }
+ }
+ }
+
+ if (result)
+ var->data.location = uniform - prog->data->UniformStorage;
+ return result;
+ }
+
+ /* Beyond blocks, there are still some corner cases of uniforms without
+ * location (ie: atomic counters) that would have a initial location equal
+ * to -1. We just return on that case. Those uniforms will be handled
+ * later.
+ */
+ if (var->data.location == -1)
+ return false;
+
+ /* TODO: following search can be problematic with shaders with a lot of
+ * uniforms. Would it be better to use some type of hash
+ */
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ if (prog->data->UniformStorage[i].remap_location == var->data.location) {
+ mark_stage_as_active(&prog->data->UniformStorage[i], stage);
+
+ struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
+ var->data.location = uniform - prog->data->UniformStorage;
+ add_parameter(uniform, ctx, prog, var->type, state);
+ return true;
+ }
+ }
+
+ return false;
+}
+