X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fglsl%2Flink_uniforms.cpp;h=1c97e19578191f915d6186e293b55941dc5ccbd0;hb=3dc932d45048843d83b0f0207043d713f7bceb3c;hp=1c975863c0df39d04478190647644b8d92011f4c;hpb=71ba6de342b88dcf8ed3aa347da157b7724230e7;p=mesa.git diff --git a/src/glsl/link_uniforms.cpp b/src/glsl/link_uniforms.cpp index 1c975863c0d..1c97e195781 100644 --- a/src/glsl/link_uniforms.cpp +++ b/src/glsl/link_uniforms.cpp @@ -29,12 +29,6 @@ #include "program/hash_table.h" #include "program.h" -static inline unsigned int -align(unsigned int a, unsigned int align) -{ - return (a + align - 1) / align * align; -} - /** * \file link_uniforms.cpp * Assign locations for GLSL uniforms. @@ -58,23 +52,106 @@ values_for_type(const glsl_type *type) } void -uniform_field_visitor::process(ir_variable *var) +program_resource_visitor::process(const glsl_type *type, const char *name) +{ + assert(type->is_record() + || (type->is_array() && type->fields.array->is_record()) + || type->is_interface() + || (type->is_array() && type->fields.array->is_interface())); + + char *name_copy = ralloc_strdup(NULL, name); + recursion(type, &name_copy, strlen(name), false, NULL); + ralloc_free(name_copy); +} + +void +program_resource_visitor::process(ir_variable *var) { const glsl_type *t = var->type; + /* false is always passed for the row_major parameter to the other + * processing functions because no information is available to do + * otherwise. See the warning in linker.h. + */ + /* Only strdup the name if we actually will need to modify it. */ - if (t->is_record() || (t->is_array() && t->fields.array->is_record())) { + if (var->data.from_named_ifc_block_array) { + /* lower_named_interface_blocks created this variable by lowering an + * interface block array to an array variable. For example if the + * original source code was: + * + * out Blk { vec4 bar } foo[3]; + * + * Then the variable is now: + * + * out vec4 bar[3]; + * + * We need to visit each array element using the names constructed like + * so: + * + * Blk[0].bar + * Blk[1].bar + * Blk[2].bar + */ + assert(t->is_array()); + const glsl_type *ifc_type = var->get_interface_type(); + char *name = ralloc_strdup(NULL, ifc_type->name); + size_t name_length = strlen(name); + for (unsigned i = 0; i < t->length; i++) { + size_t new_length = name_length; + ralloc_asprintf_rewrite_tail(&name, &new_length, "[%u].%s", i, + var->name); + /* Note: row_major is only meaningful for uniform blocks, and + * lowering is only applied to non-uniform interface blocks, so we + * can safely pass false for row_major. + */ + recursion(var->type, &name, new_length, false, NULL); + } + ralloc_free(name); + } else if (var->data.from_named_ifc_block_nonarray) { + /* lower_named_interface_blocks created this variable by lowering a + * named interface block (non-array) to an ordinary variable. For + * example if the original source code was: + * + * out Blk { vec4 bar } foo; + * + * Then the variable is now: + * + * out vec4 bar; + * + * We need to visit this variable using the name: + * + * Blk.bar + */ + const glsl_type *ifc_type = var->get_interface_type(); + char *name = ralloc_asprintf(NULL, "%s.%s", ifc_type->name, var->name); + /* Note: row_major is only meaningful for uniform blocks, and lowering + * is only applied to non-uniform interface blocks, so we can safely + * pass false for row_major. + */ + recursion(var->type, &name, strlen(name), false, NULL); + ralloc_free(name); + } else if (t->is_record() || (t->is_array() && t->fields.array->is_record())) { char *name = ralloc_strdup(NULL, var->name); - recursion(var->type, &name, strlen(name)); + recursion(var->type, &name, strlen(name), false, NULL); + ralloc_free(name); + } else if (t->is_interface()) { + char *name = ralloc_strdup(NULL, var->type->name); + recursion(var->type, &name, strlen(name), false, NULL); + ralloc_free(name); + } else if (t->is_array() && t->fields.array->is_interface()) { + char *name = ralloc_strdup(NULL, var->type->fields.array->name); + recursion(var->type, &name, strlen(name), false, NULL); ralloc_free(name); } else { - this->visit_field(t, var->name); + this->visit_field(t, var->name, false, NULL); } } void -uniform_field_visitor::recursion(const glsl_type *t, char **name, - size_t name_length) +program_resource_visitor::recursion(const glsl_type *t, char **name, + size_t name_length, bool row_major, + const glsl_type *record_type) { /* Records need to have each field processed individually. * @@ -82,30 +159,73 @@ uniform_field_visitor::recursion(const glsl_type *t, char **name, * individually, then each field of the resulting array elements processed * individually. */ - if (t->is_record()) { + if (t->is_record() || t->is_interface()) { + if (record_type == NULL && t->is_record()) + record_type = t; + for (unsigned i = 0; i < t->length; i++) { const char *field = t->fields.structure[i].name; size_t new_length = name_length; - /* Append '.field' to the current uniform name. */ - ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field); + if (t->fields.structure[i].type->is_record()) + this->visit_field(&t->fields.structure[i]); + + /* Append '.field' to the current variable name. */ + if (name_length == 0) { + ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field); + } else { + ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field); + } - recursion(t->fields.structure[i].type, name, new_length); + recursion(t->fields.structure[i].type, name, new_length, + t->fields.structure[i].row_major, record_type); + + /* Only the first leaf-field of the record gets called with the + * record type pointer. + */ + record_type = NULL; } - } else if (t->is_array() && t->fields.array->is_record()) { + } else if (t->is_array() && (t->fields.array->is_record() + || t->fields.array->is_interface())) { + if (record_type == NULL && t->fields.array->is_record()) + record_type = t->fields.array; + for (unsigned i = 0; i < t->length; i++) { size_t new_length = name_length; - /* Append the subscript to the current uniform name */ + /* Append the subscript to the current variable name */ ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i); - recursion(t->fields.array, name, new_length); + recursion(t->fields.array, name, new_length, row_major, + record_type); + + /* Only the first leaf-field of the record gets called with the + * record type pointer. + */ + record_type = NULL; } } else { - this->visit_field(t, *name); + this->visit_field(t, *name, row_major, record_type); } } +void +program_resource_visitor::visit_field(const glsl_type *type, const char *name, + bool row_major, + const glsl_type *record_type) +{ + visit_field(type, name, row_major); +} + +void +program_resource_visitor::visit_field(const glsl_struct_field *field) +{ + (void) field; + /* empty */ +} + +namespace { + /** * Class to help calculate the storage requirements for a set of uniforms * @@ -116,11 +236,11 @@ uniform_field_visitor::recursion(const glsl_type *t, char **name, * If the same uniform is added multiple times (i.e., once for each shader * target), it will only be accounted once. */ -class count_uniform_size : public uniform_field_visitor { +class count_uniform_size : public program_resource_visitor { public: count_uniform_size(struct string_to_uint_map *map) : num_active_uniforms(0), num_values(0), num_shader_samplers(0), - num_shader_uniform_components(0), map(map) + num_shader_uniform_components(0), is_ubo_var(false), map(map) { /* empty */ } @@ -131,6 +251,16 @@ public: this->num_shader_uniform_components = 0; } + void process(ir_variable *var) + { + this->is_ubo_var = var->is_in_uniform_block(); + if (var->is_interface_instance()) + program_resource_visitor::process(var->get_interface_type(), + var->get_interface_type()->name); + else + program_resource_visitor::process(var); + } + /** * Total number of active uniforms counted */ @@ -151,11 +281,18 @@ public: */ unsigned num_shader_uniform_components; + bool is_ubo_var; + private: - virtual void visit_field(const glsl_type *type, const char *name) + virtual void visit_field(const glsl_type *type, const char *name, + bool row_major) { assert(!type->is_record()); assert(!(type->is_array() && type->fields.array->is_record())); + assert(!type->is_interface()); + assert(!(type->is_array() && type->fields.array->is_interface())); + + (void) row_major; /* Count the number of samplers regardless of whether the uniform is * already in the hash table. The hash table prevents adding the same @@ -171,7 +308,8 @@ private: * Note that samplers do not count against this limit because they * don't use any storage on current hardware. */ - this->num_shader_uniform_components += values; + if (!is_ubo_var) + this->num_shader_uniform_components += values; } /* If the uniform is already in the map, there's nothing more to do. @@ -192,6 +330,8 @@ private: struct string_to_uint_map *map; }; +} /* anonymous namespace */ + /** * Class to help parcel out pieces of backing storage to uniforms * @@ -207,49 +347,137 @@ private: * the \c gl_uniform_storage and \c gl_constant_value arrays are "big * enough." */ -class parcel_out_uniform_storage : public uniform_field_visitor { +class parcel_out_uniform_storage : public program_resource_visitor { public: parcel_out_uniform_storage(struct string_to_uint_map *map, struct gl_uniform_storage *uniforms, union gl_constant_value *values) - : map(map), uniforms(uniforms), next_sampler(0), values(values) + : map(map), uniforms(uniforms), values(values) { - memset(this->targets, 0, sizeof(this->targets)); } - void start_shader() + void start_shader(gl_shader_stage shader_type) { + assert(shader_type < MESA_SHADER_STAGES); + this->shader_type = shader_type; + this->shader_samplers_used = 0; this->shader_shadow_samplers = 0; + this->next_sampler = 0; + memset(this->targets, 0, sizeof(this->targets)); } - void set_and_process(struct gl_shader *shader, + void set_and_process(struct gl_shader_program *prog, ir_variable *var) { - ubo_var = NULL; - if (var->uniform_block != -1) { - struct gl_uniform_block *block = - &shader->UniformBlocks[var->uniform_block]; - - ubo_block_index = var->uniform_block; - ubo_var_index = var->location; - ubo_var = &block->Uniforms[var->location]; - ubo_byte_offset = ubo_var->Offset; - } - - process(var); + ubo_block_index = -1; + if (var->is_in_uniform_block()) { + if (var->is_interface_instance() && var->type->is_array()) { + unsigned l = strlen(var->get_interface_type()->name); + + for (unsigned i = 0; i < prog->NumUniformBlocks; i++) { + if (strncmp(var->get_interface_type()->name, + prog->UniformBlocks[i].Name, + l) == 0 + && prog->UniformBlocks[i].Name[l] == '[') { + ubo_block_index = i; + break; + } + } + } else { + for (unsigned i = 0; i < prog->NumUniformBlocks; i++) { + if (strcmp(var->get_interface_type()->name, + prog->UniformBlocks[i].Name) == 0) { + ubo_block_index = i; + break; + } + } + } + assert(ubo_block_index != -1); + + /* Uniform blocks that were specified with an instance name must be + * handled a little bit differently. The name of the variable is the + * name used to reference the uniform block instead of being the name + * of a variable within the block. Therefore, searching for the name + * within the block will fail. + */ + if (var->is_interface_instance()) { + ubo_byte_offset = 0; + ubo_row_major = false; + } else { + const struct gl_uniform_block *const block = + &prog->UniformBlocks[ubo_block_index]; + + assert(var->data.location != -1); + + const struct gl_uniform_buffer_variable *const ubo_var = + &block->Uniforms[var->data.location]; + + ubo_row_major = ubo_var->RowMajor; + ubo_byte_offset = ubo_var->Offset; + } + + if (var->is_interface_instance()) + process(var->get_interface_type(), + var->get_interface_type()->name); + else + process(var); + } else + process(var); } - struct gl_uniform_buffer_variable *ubo_var; int ubo_block_index; - int ubo_var_index; int ubo_byte_offset; + bool ubo_row_major; + gl_shader_stage shader_type; private: - virtual void visit_field(const glsl_type *type, const char *name) + void handle_samplers(const glsl_type *base_type, + struct gl_uniform_storage *uniform) + { + if (base_type->is_sampler()) { + uniform->sampler[shader_type].index = this->next_sampler; + uniform->sampler[shader_type].active = true; + + /* Increment the sampler by 1 for non-arrays and by the number of + * array elements for arrays. + */ + this->next_sampler += + MAX2(1, uniform->array_elements); + + const gl_texture_index target = base_type->sampler_index(); + const unsigned shadow = base_type->sampler_shadow; + for (unsigned i = uniform->sampler[shader_type].index; + i < MIN2(this->next_sampler, MAX_SAMPLERS); + i++) { + this->targets[i] = target; + this->shader_samplers_used |= 1U << i; + this->shader_shadow_samplers |= shadow << i; + } + } else { + uniform->sampler[shader_type].index = ~0; + uniform->sampler[shader_type].active = false; + } + } + + virtual void visit_field(const glsl_type *type, const char *name, + bool row_major) + { + (void) type; + (void) name; + (void) row_major; + assert(!"Should not get here."); + } + + virtual void visit_field(const glsl_type *type, const char *name, + bool row_major, const glsl_type *record_type) { assert(!type->is_record()); assert(!(type->is_array() && type->fields.array->is_record())); + assert(!type->is_interface()); + assert(!(type->is_array() && type->fields.array->is_interface())); + + (void) row_major; unsigned id; bool found = this->map->get(id, name); @@ -258,31 +486,6 @@ private: if (!found) return; - /* If there is already storage associated with this uniform, it means - * that it was set while processing an earlier shader stage. For - * example, we may be processing the uniform in the fragment shader, but - * the uniform was already processed in the vertex shader. - */ - if (this->uniforms[id].storage != NULL) { - /* If the uniform already has storage set from another shader stage, - * mark the samplers used for this shader stage. - */ - if (type->contains_sampler()) { - const unsigned count = MAX2(1, this->uniforms[id].array_elements); - const unsigned shadow = (type->is_array()) - ? type->fields.array->sampler_shadow : type->sampler_shadow; - - for (unsigned i = 0; i < count; i++) { - const unsigned s = this->uniforms[id].sampler + i; - - this->shader_samplers_used |= 1U << s; - this->shader_shadow_samplers |= shadow << s; - } - } - - return; - } - const glsl_type *base_type; if (type->is_array()) { this->uniforms[id].array_elements = type->length; @@ -292,26 +495,16 @@ private: base_type = type; } - if (base_type->is_sampler()) { - this->uniforms[id].sampler = this->next_sampler; - - /* Increment the sampler by 1 for non-arrays and by the number of - * array elements for arrays. - */ - this->next_sampler += MAX2(1, this->uniforms[id].array_elements); - - const gl_texture_index target = base_type->sampler_index(); - const unsigned shadow = base_type->sampler_shadow; - for (unsigned i = this->uniforms[id].sampler - ; i < this->next_sampler - ; i++) { - this->targets[i] = target; - this->shader_samplers_used |= 1U << i; - this->shader_shadow_samplers |= shadow << i; - } + /* This assigns sampler uniforms to sampler units. */ + handle_samplers(base_type, &this->uniforms[id]); - } else { - this->uniforms[id].sampler = ~0; + /* If there is already storage associated with this uniform, it means + * that it was set while processing an earlier shader stage. For + * example, we may be processing the uniform in the fragment shader, but + * the uniform was already processed in the vertex shader. + */ + if (this->uniforms[id].storage != NULL) { + return; } this->uniforms[id].name = ralloc_strdup(this->uniforms, name); @@ -320,17 +513,20 @@ private: this->uniforms[id].num_driver_storage = 0; this->uniforms[id].driver_storage = NULL; this->uniforms[id].storage = this->values; - if (this->ubo_var) { + this->uniforms[id].atomic_buffer_index = -1; + if (this->ubo_block_index != -1) { this->uniforms[id].block_index = this->ubo_block_index; - unsigned alignment = type->std140_base_alignment(ubo_var->RowMajor); - this->ubo_byte_offset = align(this->ubo_byte_offset, alignment); + const unsigned alignment = record_type + ? record_type->std140_base_alignment(ubo_row_major) + : type->std140_base_alignment(ubo_row_major); + this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment); this->uniforms[id].offset = this->ubo_byte_offset; - this->ubo_byte_offset += type->std140_size(ubo_var->RowMajor); + this->ubo_byte_offset += type->std140_size(ubo_row_major); if (type->is_array()) { this->uniforms[id].array_stride = - align(type->fields.array->std140_size(ubo_var->RowMajor), 16); + glsl_align(type->fields.array->std140_size(ubo_row_major), 16); } else { this->uniforms[id].array_stride = 0; } @@ -338,7 +534,7 @@ private: if (type->is_matrix() || (type->is_array() && type->fields.array->is_matrix())) { this->uniforms[id].matrix_stride = 16; - this->uniforms[id].row_major = ubo_var->RowMajor; + this->uniforms[id].row_major = ubo_row_major; } else { this->uniforms[id].matrix_stride = 0; this->uniforms[id].row_major = false; @@ -389,26 +585,10 @@ link_cross_validate_uniform_block(void *mem_ctx, { for (unsigned int i = 0; i < *num_linked_blocks; i++) { struct gl_uniform_block *old_block = &(*linked_blocks)[i]; - if (strcmp(old_block->Name, new_block->Name) == 0) { - if (old_block->NumUniforms != new_block->NumUniforms) { - return -1; - } - - for (unsigned j = 0; j < old_block->NumUniforms; j++) { - if (strcmp(old_block->Uniforms[j].Name, - new_block->Uniforms[j].Name) != 0) - return -1; - if (old_block->Uniforms[j].Offset != - new_block->Uniforms[j].Offset) - return -1; - - if (old_block->Uniforms[j].RowMajor != - new_block->Uniforms[j].RowMajor) - return -1; - } - return i; - } + if (strcmp(old_block->Name, new_block->Name) == 0) + return link_uniform_blocks_are_compatible(old_block, new_block) + ? i : -1; } *linked_blocks = reralloc(mem_ctx, *linked_blocks, @@ -430,7 +610,13 @@ link_cross_validate_uniform_block(void *mem_ctx, struct gl_uniform_buffer_variable *ubo_var = &linked_block->Uniforms[i]; - ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name); + if (ubo_var->Name == ubo_var->IndexName) { + ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name); + ubo_var->IndexName = ubo_var->Name; + } else { + ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name); + ubo_var->IndexName = ralloc_strdup(*linked_blocks, ubo_var->IndexName); + } } return linked_block_index; @@ -442,24 +628,54 @@ link_cross_validate_uniform_block(void *mem_ctx, * would point at the uniform block list in one of the pre-linked * shaders). */ -static bool +static void link_update_uniform_buffer_variables(struct gl_shader *shader) { foreach_list(node, shader->ir) { ir_variable *const var = ((ir_instruction *) node)->as_variable(); - if ((var == NULL) || (var->uniform_block == -1)) + if ((var == NULL) || !var->is_in_uniform_block()) continue; - assert(var->mode == ir_var_uniform); + assert(var->data.mode == ir_var_uniform); + + if (var->is_interface_instance()) { + var->data.location = 0; + continue; + } bool found = false; + char sentinel = '\0'; + + if (var->type->is_record()) { + sentinel = '.'; + } else if (var->type->is_array() + && var->type->fields.array->is_record()) { + sentinel = '['; + } + + const unsigned l = strlen(var->name); for (unsigned i = 0; i < shader->NumUniformBlocks; i++) { for (unsigned j = 0; j < shader->UniformBlocks[i].NumUniforms; j++) { - if (!strcmp(var->name, shader->UniformBlocks[i].Uniforms[j].Name)) { + if (sentinel) { + const char *begin = shader->UniformBlocks[i].Uniforms[j].Name; + const char *end = strchr(begin, sentinel); + + if (end == NULL) + continue; + + if (l != (end - begin)) + continue; + + if (strncmp(var->name, begin, l) == 0) { + found = true; + var->data.location = j; + break; + } + } else if (!strcmp(var->name, + shader->UniformBlocks[i].Uniforms[j].Name)) { found = true; - var->uniform_block = i; - var->location = j; + var->data.location = j; break; } } @@ -468,8 +684,6 @@ link_update_uniform_buffer_variables(struct gl_shader *shader) } assert(found); } - - return true; } void @@ -486,11 +700,23 @@ link_assign_uniform_block_offsets(struct gl_shader *shader) unsigned alignment = type->std140_base_alignment(ubo_var->RowMajor); unsigned size = type->std140_size(ubo_var->RowMajor); - offset = align(offset, alignment); + offset = glsl_align(offset, alignment); ubo_var->Offset = offset; offset += size; } - block->UniformBufferSize = offset; + + /* From the GL_ARB_uniform_buffer_object spec: + * + * "For uniform blocks laid out according to [std140] rules, + * the minimum buffer object size returned by the + * UNIFORM_BLOCK_DATA_SIZE query is derived by taking the + * offset of the last basic machine unit consumed by the + * last uniform of the uniform block (including any + * end-of-array or end-of-structure padding), adding one, + * and rounding up to the next multiple of the base + * alignment required for a vec4." + */ + block->UniformBufferSize = glsl_align(offset, 16); } } @@ -507,25 +733,6 @@ link_assign_uniform_locations(struct gl_shader_program *prog) prog->UniformHash = new string_to_uint_map; } - /* Uniforms that lack an initializer in the shader code have an initial - * value of zero. This includes sampler uniforms. - * - * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says: - * - * "The link time initial value is either the value of the variable's - * initializer, if present, or 0 if no initializer is present. Sampler - * types cannot have initializers." - */ - memset(prog->SamplerUnits, 0, sizeof(prog->SamplerUnits)); - - for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] == NULL) - continue; - - if (!link_update_uniform_buffer_variables(prog->_LinkedShaders[i])) - return; - } - /* First pass: Count the uniform resources used by the user-defined * uniforms. While this happens, each active uniform will have an index * assigned to it. @@ -534,31 +741,54 @@ link_assign_uniform_locations(struct gl_shader_program *prog) * glGetUniformLocation. */ count_uniform_size uniform_size(prog->UniformHash); - for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] == NULL) + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + struct gl_shader *sh = prog->_LinkedShaders[i]; + + if (sh == NULL) continue; + /* Uniforms that lack an initializer in the shader code have an initial + * value of zero. This includes sampler uniforms. + * + * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says: + * + * "The link time initial value is either the value of the variable's + * initializer, if present, or 0 if no initializer is present. Sampler + * types cannot have initializers." + */ + memset(sh->SamplerUnits, 0, sizeof(sh->SamplerUnits)); + + link_update_uniform_buffer_variables(sh); + /* Reset various per-shader target counts. */ uniform_size.start_shader(); - foreach_list(node, prog->_LinkedShaders[i]->ir) { + foreach_list(node, sh->ir) { ir_variable *const var = ((ir_instruction *) node)->as_variable(); - if ((var == NULL) || (var->mode != ir_var_uniform)) + if ((var == NULL) || (var->data.mode != ir_var_uniform)) continue; /* FINISHME: Update code to process built-in uniforms! */ - if (strncmp("gl_", var->name, 3) == 0) + if (strncmp("gl_", var->name, 3) == 0) { + uniform_size.num_shader_uniform_components += + var->type->component_slots(); continue; + } uniform_size.process(var); } - prog->_LinkedShaders[i]->num_samplers = uniform_size.num_shader_samplers; - prog->_LinkedShaders[i]->num_uniform_components = - uniform_size.num_shader_uniform_components; + sh->num_samplers = uniform_size.num_shader_samplers; + sh->num_uniform_components = uniform_size.num_shader_uniform_components; + + sh->num_combined_uniform_components = sh->num_uniform_components; + for (unsigned i = 0; i < sh->NumUniformBlocks; i++) { + sh->num_combined_uniform_components += + sh->UniformBlocks[i].UniformBufferSize / 4; + } } const unsigned num_user_uniforms = uniform_size.num_active_uniforms; @@ -579,18 +809,16 @@ link_assign_uniform_locations(struct gl_shader_program *prog) parcel_out_uniform_storage parcel(prog->UniformHash, uniforms, data); - for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { if (prog->_LinkedShaders[i] == NULL) continue; - /* Reset various per-shader target counts. - */ - parcel.start_shader(); + parcel.start_shader((gl_shader_stage)i); foreach_list(node, prog->_LinkedShaders[i]->ir) { ir_variable *const var = ((ir_instruction *) node)->as_variable(); - if ((var == NULL) || (var->mode != ir_var_uniform)) + if ((var == NULL) || (var->data.mode != ir_var_uniform)) continue; /* FINISHME: Update code to process built-in uniforms! @@ -598,15 +826,30 @@ link_assign_uniform_locations(struct gl_shader_program *prog) if (strncmp("gl_", var->name, 3) == 0) continue; - parcel.set_and_process(prog->_LinkedShaders[i], var); + parcel.set_and_process(prog, var); } prog->_LinkedShaders[i]->active_samplers = parcel.shader_samplers_used; prog->_LinkedShaders[i]->shadow_samplers = parcel.shader_shadow_samplers; + + STATIC_ASSERT(sizeof(prog->_LinkedShaders[i]->SamplerTargets) == sizeof(parcel.targets)); + memcpy(prog->_LinkedShaders[i]->SamplerTargets, parcel.targets, + sizeof(prog->_LinkedShaders[i]->SamplerTargets)); + } + + /* Determine the size of the largest uniform array queryable via + * glGetUniformLocation. Using this as the location scale guarantees that + * there is enough "room" for the array index to be stored in the low order + * part of the uniform location. It also makes the locations be more + * tightly packed. + */ + unsigned max_array_size = 1; + for (unsigned i = 0; i < num_user_uniforms; i++) { + if (uniforms[i].array_elements > max_array_size) + max_array_size = uniforms[i].array_elements; } - assert(sizeof(prog->SamplerTargets) == sizeof(parcel.targets)); - memcpy(prog->SamplerTargets, parcel.targets, sizeof(prog->SamplerTargets)); + prog->UniformLocationBaseScale = max_array_size; #ifndef NDEBUG for (unsigned i = 0; i < num_user_uniforms; i++) {