X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fglsl%2Flink_uniforms.cpp;h=a0cb618292517b4b63a0896f2c3e55f1c62afc6b;hb=c8bc8d723598ec87bbce9a2439075dfe1612a359;hp=f1284adb293c2213acc007965e58a2af1a0a3e5d;hpb=22233da1ee4b59663966169759960c00c033d0e9;p=mesa.git diff --git a/src/glsl/link_uniforms.cpp b/src/glsl/link_uniforms.cpp index f1284adb293..a0cb6182925 100644 --- a/src/glsl/link_uniforms.cpp +++ b/src/glsl/link_uniforms.cpp @@ -36,6 +36,11 @@ * \author Ian Romanick */ +/** + * Used by linker to indicate uniforms that have no location set. + */ +#define UNMAPPED_UNIFORM_LOC ~0u + /** * Count the backing storage requirements for a type */ @@ -52,22 +57,22 @@ values_for_type(const glsl_type *type) } void -uniform_field_visitor::process(const glsl_type *type, const char *name) +program_resource_visitor::process(const glsl_type *type, const char *name) { - assert(type->is_record() - || (type->is_array() && type->fields.array->is_record()) - || type->is_interface() - || (type->is_array() && type->fields.array->is_interface())); + assert(type->without_array()->is_record() + || type->without_array()->is_interface()); char *name_copy = ralloc_strdup(NULL, name); - recursion(type, &name_copy, strlen(name), false); + recursion(type, &name_copy, strlen(name), false, NULL, false); ralloc_free(name_copy); } void -uniform_field_visitor::process(ir_variable *var) +program_resource_visitor::process(ir_variable *var) { const glsl_type *t = var->type; + const bool row_major = + var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR; /* false is always passed for the row_major parameter to the other * processing functions because no information is available to do @@ -75,26 +80,84 @@ uniform_field_visitor::process(ir_variable *var) */ /* Only strdup the name if we actually will need to modify it. */ - if (t->is_record() || (t->is_array() && t->fields.array->is_record())) { + if (var->data.from_named_ifc_block_array) { + /* lower_named_interface_blocks created this variable by lowering an + * interface block array to an array variable. For example if the + * original source code was: + * + * out Blk { vec4 bar } foo[3]; + * + * Then the variable is now: + * + * out vec4 bar[3]; + * + * We need to visit each array element using the names constructed like + * so: + * + * Blk[0].bar + * Blk[1].bar + * Blk[2].bar + */ + assert(t->is_array()); + const glsl_type *ifc_type = var->get_interface_type(); + char *name = ralloc_strdup(NULL, ifc_type->name); + size_t name_length = strlen(name); + for (unsigned i = 0; i < t->length; i++) { + size_t new_length = name_length; + ralloc_asprintf_rewrite_tail(&name, &new_length, "[%u].%s", i, + var->name); + /* Note: row_major is only meaningful for uniform blocks, and + * lowering is only applied to non-uniform interface blocks, so we + * can safely pass false for row_major. + */ + recursion(var->type, &name, new_length, row_major, NULL, false); + } + ralloc_free(name); + } else if (var->data.from_named_ifc_block_nonarray) { + /* lower_named_interface_blocks created this variable by lowering a + * named interface block (non-array) to an ordinary variable. For + * example if the original source code was: + * + * out Blk { vec4 bar } foo; + * + * Then the variable is now: + * + * out vec4 bar; + * + * We need to visit this variable using the name: + * + * Blk.bar + */ + const glsl_type *ifc_type = var->get_interface_type(); + char *name = ralloc_asprintf(NULL, "%s.%s", ifc_type->name, var->name); + /* Note: row_major is only meaningful for uniform blocks, and lowering + * is only applied to non-uniform interface blocks, so we can safely + * pass false for row_major. + */ + recursion(var->type, &name, strlen(name), row_major, NULL, false); + ralloc_free(name); + } else if (t->without_array()->is_record()) { char *name = ralloc_strdup(NULL, var->name); - recursion(var->type, &name, strlen(name), false); + recursion(var->type, &name, strlen(name), row_major, NULL, false); ralloc_free(name); } else if (t->is_interface()) { char *name = ralloc_strdup(NULL, var->type->name); - recursion(var->type, &name, strlen(name), false); + recursion(var->type, &name, strlen(name), row_major, NULL, false); ralloc_free(name); } else if (t->is_array() && t->fields.array->is_interface()) { char *name = ralloc_strdup(NULL, var->type->fields.array->name); - recursion(var->type, &name, strlen(name), false); + recursion(var->type, &name, strlen(name), row_major, NULL, false); ralloc_free(name); } else { - this->visit_field(t, var->name, false); + this->visit_field(t, var->name, row_major, NULL, false); } } void -uniform_field_visitor::recursion(const glsl_type *t, char **name, - size_t name_length, bool row_major) +program_resource_visitor::recursion(const glsl_type *t, char **name, + size_t name_length, bool row_major, + const glsl_type *record_type, + bool last_field) { /* Records need to have each field processed individually. * @@ -103,6 +166,12 @@ uniform_field_visitor::recursion(const glsl_type *t, char **name, * individually. */ if (t->is_record() || t->is_interface()) { + if (record_type == NULL && t->is_record()) + record_type = t; + + if (t->is_record()) + this->enter_record(t, *name, row_major); + for (unsigned i = 0; i < t->length; i++) { const char *field = t->fields.structure[i].name; size_t new_length = name_length; @@ -110,54 +179,113 @@ uniform_field_visitor::recursion(const glsl_type *t, char **name, if (t->fields.structure[i].type->is_record()) this->visit_field(&t->fields.structure[i]); - /* Append '.field' to the current uniform name. */ + /* Append '.field' to the current variable name. */ if (name_length == 0) { ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field); } else { ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field); } + /* The layout of structures at the top level of the block is set + * during parsing. For matrices contained in multiple levels of + * structures in the block, the inner structures have no layout. + * These cases must potentially inherit the layout from the outer + * levels. + */ + bool field_row_major = row_major; + const enum glsl_matrix_layout matrix_layout = + glsl_matrix_layout(t->fields.structure[i].matrix_layout); + if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) { + field_row_major = true; + } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) { + field_row_major = false; + } + recursion(t->fields.structure[i].type, name, new_length, - t->fields.structure[i].row_major); + field_row_major, + record_type, + (i + 1) == t->length); + + /* Only the first leaf-field of the record gets called with the + * record type pointer. + */ + record_type = NULL; + } + + if (t->is_record()) { + (*name)[name_length] = '\0'; + this->leave_record(t, *name, row_major); } } else if (t->is_array() && (t->fields.array->is_record() || t->fields.array->is_interface())) { + if (record_type == NULL && t->fields.array->is_record()) + record_type = t->fields.array; + for (unsigned i = 0; i < t->length; i++) { size_t new_length = name_length; - /* Append the subscript to the current uniform name */ + /* Append the subscript to the current variable name */ ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i); - recursion(t->fields.array, name, new_length, - t->fields.structure[i].row_major); + recursion(t->fields.array, name, new_length, row_major, + record_type, + (i + 1) == t->length); + + /* Only the first leaf-field of the record gets called with the + * record type pointer. + */ + record_type = NULL; } } else { - this->visit_field(t, *name, row_major); + this->visit_field(t, *name, row_major, record_type, last_field); } } void -uniform_field_visitor::visit_field(const glsl_struct_field *field) +program_resource_visitor::visit_field(const glsl_type *type, const char *name, + bool row_major, + const glsl_type *, + bool /* last_field */) +{ + visit_field(type, name, row_major); +} + +void +program_resource_visitor::visit_field(const glsl_struct_field *field) { (void) field; /* empty */ } +void +program_resource_visitor::enter_record(const glsl_type *, const char *, bool) +{ +} + +void +program_resource_visitor::leave_record(const glsl_type *, const char *, bool) +{ +} + +namespace { + /** * Class to help calculate the storage requirements for a set of uniforms * * As uniforms are added to the active set the number of active uniforms and * the storage requirements for those uniforms are accumulated. The active - * uniforms are added the the hash table supplied to the constructor. + * uniforms are added to the hash table supplied to the constructor. * * If the same uniform is added multiple times (i.e., once for each shader * target), it will only be accounted once. */ -class count_uniform_size : public uniform_field_visitor { +class count_uniform_size : public program_resource_visitor { public: count_uniform_size(struct string_to_uint_map *map) : num_active_uniforms(0), num_values(0), num_shader_samplers(0), - num_shader_uniform_components(0), map(map) + num_shader_images(0), num_shader_uniform_components(0), + num_shader_subroutines(0), + is_ubo_var(false), map(map) { /* empty */ } @@ -165,16 +293,19 @@ public: void start_shader() { this->num_shader_samplers = 0; + this->num_shader_images = 0; this->num_shader_uniform_components = 0; + this->num_shader_subroutines = 0; } void process(ir_variable *var) { + this->is_ubo_var = var->is_in_buffer_block(); if (var->is_interface_instance()) - uniform_field_visitor::process(var->interface_type, - var->interface_type->name); + program_resource_visitor::process(var->get_interface_type(), + var->get_interface_type()->name); else - uniform_field_visitor::process(var); + program_resource_visitor::process(var); } /** @@ -192,19 +323,29 @@ public: */ unsigned num_shader_samplers; + /** + * Number of images used + */ + unsigned num_shader_images; + /** * Number of uniforms used in the current shader */ unsigned num_shader_uniform_components; + /** + * Number of subroutine uniforms used + */ + unsigned num_shader_subroutines; + + bool is_ubo_var; + private: virtual void visit_field(const glsl_type *type, const char *name, bool row_major) { - assert(!type->is_record()); - assert(!(type->is_array() && type->fields.array->is_record())); - assert(!type->is_interface()); - assert(!(type->is_array() && type->fields.array->is_interface())); + assert(!type->without_array()->is_record()); + assert(!type->without_array()->is_interface()); (void) row_major; @@ -214,15 +355,26 @@ private: * count it for each shader target. */ const unsigned values = values_for_type(type); - if (type->contains_sampler()) { - this->num_shader_samplers += - type->is_array() ? type->array_size() : 1; + if (type->contains_subroutine()) { + this->num_shader_subroutines += values; + } else if (type->contains_sampler()) { + this->num_shader_samplers += values; + } else if (type->contains_image()) { + this->num_shader_images += values; + + /* As drivers are likely to represent image uniforms as + * scalar indices, count them against the limit of uniform + * components in the default block. The spec allows image + * uniforms to use up no more than one scalar slot. + */ + this->num_shader_uniform_components += values; } else { /* Accumulate the total number of uniform slots used by this shader. * Note that samplers do not count against this limit because they * don't use any storage on current hardware. */ - this->num_shader_uniform_components += values; + if (!is_ubo_var) + this->num_shader_uniform_components += values; } /* If the uniform is already in the map, there's nothing more to do. @@ -243,6 +395,8 @@ private: struct string_to_uint_map *map; }; +} /* anonymous namespace */ + /** * Class to help parcel out pieces of backing storage to uniforms * @@ -258,32 +412,41 @@ private: * the \c gl_uniform_storage and \c gl_constant_value arrays are "big * enough." */ -class parcel_out_uniform_storage : public uniform_field_visitor { +class parcel_out_uniform_storage : public program_resource_visitor { public: parcel_out_uniform_storage(struct string_to_uint_map *map, struct gl_uniform_storage *uniforms, union gl_constant_value *values) - : map(map), uniforms(uniforms), next_sampler(0), values(values) + : map(map), uniforms(uniforms), values(values) { - memset(this->targets, 0, sizeof(this->targets)); } - void start_shader() + void start_shader(gl_shader_stage shader_type) { + assert(shader_type < MESA_SHADER_STAGES); + this->shader_type = shader_type; + this->shader_samplers_used = 0; this->shader_shadow_samplers = 0; + this->next_sampler = 0; + this->next_image = 0; + this->next_subroutine = 0; + memset(this->targets, 0, sizeof(this->targets)); } void set_and_process(struct gl_shader_program *prog, ir_variable *var) { + current_var = var; + field_counter = 0; + ubo_block_index = -1; - if (var->is_in_uniform_block()) { + if (var->is_in_buffer_block()) { if (var->is_interface_instance() && var->type->is_array()) { - unsigned l = strlen(var->interface_type->name); + unsigned l = strlen(var->get_interface_type()->name); for (unsigned i = 0; i < prog->NumUniformBlocks; i++) { - if (strncmp(var->interface_type->name, + if (strncmp(var->get_interface_type()->name, prog->UniformBlocks[i].Name, l) == 0 && prog->UniformBlocks[i].Name[l] == '[') { @@ -293,7 +456,7 @@ public: } } else { for (unsigned i = 0; i < prog->NumUniformBlocks; i++) { - if (strcmp(var->interface_type->name, + if (strcmp(var->get_interface_type()->name, prog->UniformBlocks[i].Name) == 0) { ubo_block_index = i; break; @@ -310,22 +473,21 @@ public: */ if (var->is_interface_instance()) { ubo_byte_offset = 0; - ubo_row_major = false; } else { const struct gl_uniform_block *const block = &prog->UniformBlocks[ubo_block_index]; - assert(var->location != -1); + assert(var->data.location != -1); const struct gl_uniform_buffer_variable *const ubo_var = - &block->Uniforms[var->location]; + &block->Uniforms[var->data.location]; - ubo_row_major = ubo_var->RowMajor; ubo_byte_offset = ubo_var->Offset; } if (var->is_interface_instance()) - process(var->interface_type, var->interface_type->name); + process(var->get_interface_type(), + var->get_interface_type()->name); else process(var); } else @@ -334,18 +496,106 @@ public: int ubo_block_index; int ubo_byte_offset; - bool ubo_row_major; + gl_shader_stage shader_type; private: + void handle_samplers(const glsl_type *base_type, + struct gl_uniform_storage *uniform) + { + if (base_type->is_sampler()) { + uniform->sampler[shader_type].index = this->next_sampler; + uniform->sampler[shader_type].active = true; + + /* Increment the sampler by 1 for non-arrays and by the number of + * array elements for arrays. + */ + this->next_sampler += + MAX2(1, uniform->array_elements); + + const gl_texture_index target = base_type->sampler_index(); + const unsigned shadow = base_type->sampler_shadow; + for (unsigned i = uniform->sampler[shader_type].index; + i < MIN2(this->next_sampler, MAX_SAMPLERS); + i++) { + this->targets[i] = target; + this->shader_samplers_used |= 1U << i; + this->shader_shadow_samplers |= shadow << i; + } + } else { + uniform->sampler[shader_type].index = ~0; + uniform->sampler[shader_type].active = false; + } + } + + void handle_images(const glsl_type *base_type, + struct gl_uniform_storage *uniform) + { + if (base_type->is_image()) { + uniform->image[shader_type].index = this->next_image; + uniform->image[shader_type].active = true; + + /* Increment the image index by 1 for non-arrays and by the + * number of array elements for arrays. + */ + this->next_image += MAX2(1, uniform->array_elements); + + } else { + uniform->image[shader_type].index = ~0; + uniform->image[shader_type].active = false; + } + } + + void handle_subroutines(const glsl_type *base_type, + struct gl_uniform_storage *uniform) + { + if (base_type->is_subroutine()) { + uniform->subroutine[shader_type].index = this->next_subroutine; + uniform->subroutine[shader_type].active = true; + + /* Increment the subroutine index by 1 for non-arrays and by the + * number of array elements for arrays. + */ + this->next_subroutine += MAX2(1, uniform->array_elements); + + } else { + uniform->subroutine[shader_type].index = ~0; + uniform->subroutine[shader_type].active = false; + } + } + virtual void visit_field(const glsl_type *type, const char *name, bool row_major) { - assert(!type->is_record()); - assert(!(type->is_array() && type->fields.array->is_record())); - assert(!type->is_interface()); - assert(!(type->is_array() && type->fields.array->is_interface())); - + (void) type; + (void) name; (void) row_major; + assert(!"Should not get here."); + } + + virtual void enter_record(const glsl_type *type, const char *, + bool row_major) { + assert(type->is_record()); + if (this->ubo_block_index == -1) + return; + this->ubo_byte_offset = glsl_align( + this->ubo_byte_offset, type->std140_base_alignment(row_major)); + } + + virtual void leave_record(const glsl_type *type, const char *, + bool row_major) { + assert(type->is_record()); + if (this->ubo_block_index == -1) + return; + this->ubo_byte_offset = glsl_align( + this->ubo_byte_offset, type->std140_base_alignment(row_major)); + } + + virtual void visit_field(const glsl_type *type, const char *name, + bool row_major, const glsl_type *record_type, + bool /* last_field */) + { + assert(!type->without_array()->is_record()); + assert(!type->without_array()->is_interface()); unsigned id; bool found = this->map->get(id, name); @@ -354,31 +604,6 @@ private: if (!found) return; - /* If there is already storage associated with this uniform, it means - * that it was set while processing an earlier shader stage. For - * example, we may be processing the uniform in the fragment shader, but - * the uniform was already processed in the vertex shader. - */ - if (this->uniforms[id].storage != NULL) { - /* If the uniform already has storage set from another shader stage, - * mark the samplers used for this shader stage. - */ - if (type->contains_sampler()) { - const unsigned count = MAX2(1, this->uniforms[id].array_elements); - const unsigned shadow = (type->is_array()) - ? type->fields.array->sampler_shadow : type->sampler_shadow; - - for (unsigned i = 0; i < count; i++) { - const unsigned s = this->uniforms[id].sampler + i; - - this->shader_samplers_used |= 1U << s; - this->shader_shadow_samplers |= shadow << s; - } - } - - return; - } - const glsl_type *base_type; if (type->is_array()) { this->uniforms[id].array_elements = type->length; @@ -388,26 +613,35 @@ private: base_type = type; } - if (base_type->is_sampler()) { - this->uniforms[id].sampler = this->next_sampler; + /* This assigns uniform indices to sampler and image uniforms. */ + handle_samplers(base_type, &this->uniforms[id]); + handle_images(base_type, &this->uniforms[id]); + handle_subroutines(base_type, &this->uniforms[id]); - /* Increment the sampler by 1 for non-arrays and by the number of - * array elements for arrays. - */ - this->next_sampler += MAX2(1, this->uniforms[id].array_elements); - - const gl_texture_index target = base_type->sampler_index(); - const unsigned shadow = base_type->sampler_shadow; - for (unsigned i = this->uniforms[id].sampler - ; i < MIN2(this->next_sampler, MAX_SAMPLERS) - ; i++) { - this->targets[i] = target; - this->shader_samplers_used |= 1U << i; - this->shader_shadow_samplers |= shadow << i; - } + /* If there is already storage associated with this uniform or if the + * uniform is set as builtin, it means that it was set while processing + * an earlier shader stage. For example, we may be processing the + * uniform in the fragment shader, but the uniform was already processed + * in the vertex shader. + */ + if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) { + return; + } + /* Assign explicit locations. */ + if (current_var->data.explicit_location) { + /* Set sequential locations for struct fields. */ + if (record_type != NULL) { + const unsigned entries = MAX2(1, this->uniforms[id].array_elements); + this->uniforms[id].remap_location = + current_var->data.location + field_counter; + field_counter += entries; + } else { + this->uniforms[id].remap_location = current_var->data.location; + } } else { - this->uniforms[id].sampler = ~0; + /* Initialize to to indicate that no location is set */ + this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC; } this->uniforms[id].name = ralloc_strdup(this->uniforms, name); @@ -415,26 +649,38 @@ private: this->uniforms[id].initialized = 0; this->uniforms[id].num_driver_storage = 0; this->uniforms[id].driver_storage = NULL; - this->uniforms[id].storage = this->values; + this->uniforms[id].atomic_buffer_index = -1; + this->uniforms[id].hidden = + current_var->data.how_declared == ir_var_hidden; + this->uniforms[id].builtin = is_gl_identifier(name); + + /* Do not assign storage if the uniform is builtin */ + if (!this->uniforms[id].builtin) + this->uniforms[id].storage = this->values; + if (this->ubo_block_index != -1) { this->uniforms[id].block_index = this->ubo_block_index; - unsigned alignment = type->std140_base_alignment(ubo_row_major); + const unsigned alignment = type->std140_base_alignment(row_major); this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment); this->uniforms[id].offset = this->ubo_byte_offset; - this->ubo_byte_offset += type->std140_size(ubo_row_major); + this->ubo_byte_offset += type->std140_size(row_major); if (type->is_array()) { this->uniforms[id].array_stride = - glsl_align(type->fields.array->std140_size(ubo_row_major), 16); + glsl_align(type->fields.array->std140_size(row_major), 16); } else { this->uniforms[id].array_stride = 0; } - if (type->is_matrix() || - (type->is_array() && type->fields.array->is_matrix())) { - this->uniforms[id].matrix_stride = 16; - this->uniforms[id].row_major = ubo_row_major; + if (type->without_array()->is_matrix()) { + const glsl_type *matrix = type->without_array(); + const unsigned N = matrix->base_type == GLSL_TYPE_DOUBLE ? 8 : 4; + const unsigned items = row_major ? matrix->matrix_columns : matrix->vector_elements; + + assert(items <= 4); + this->uniforms[id].matrix_stride = glsl_align(items * N, 16); + this->uniforms[id].row_major = row_major; } else { this->uniforms[id].matrix_stride = 0; this->uniforms[id].row_major = false; @@ -454,12 +700,25 @@ private: struct gl_uniform_storage *uniforms; unsigned next_sampler; + unsigned next_image; + unsigned next_subroutine; public: union gl_constant_value *values; gl_texture_index targets[MAX_SAMPLERS]; + /** + * Current variable being processed. + */ + ir_variable *current_var; + + /** + * Field counter is used to take care that uniform structures + * with explicit locations get sequential locations. + */ + unsigned field_counter; + /** * Mask of samplers used by the current shader stage. */ @@ -531,16 +790,17 @@ link_cross_validate_uniform_block(void *mem_ctx, static void link_update_uniform_buffer_variables(struct gl_shader *shader) { - foreach_list(node, shader->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); + foreach_in_list(ir_instruction, node, shader->ir) { + ir_variable *const var = node->as_variable(); - if ((var == NULL) || !var->is_in_uniform_block()) + if ((var == NULL) || !var->is_in_buffer_block()) continue; - assert(var->mode == ir_var_uniform); + assert(var->data.mode == ir_var_uniform || + var->data.mode == ir_var_shader_storage); if (var->is_interface_instance()) { - var->location = 0; + var->data.location = 0; continue; } @@ -564,18 +824,18 @@ link_update_uniform_buffer_variables(struct gl_shader *shader) if (end == NULL) continue; - if (l != (end - begin)) + if ((ptrdiff_t) l != (end - begin)) continue; if (strncmp(var->name, begin, l) == 0) { found = true; - var->location = j; + var->data.location = j; break; } } else if (!strcmp(var->name, shader->UniformBlocks[i].Uniforms[j].Name)) { found = true; - var->location = j; + var->data.location = j; break; } } @@ -586,46 +846,92 @@ link_update_uniform_buffer_variables(struct gl_shader *shader) } } -void -link_assign_uniform_block_offsets(struct gl_shader *shader) +/** + * Scan the program for image uniforms and store image unit access + * information into the gl_shader data structure. + */ +static void +link_set_image_access_qualifiers(struct gl_shader_program *prog) { - for (unsigned b = 0; b < shader->NumUniformBlocks; b++) { - struct gl_uniform_block *block = &shader->UniformBlocks[b]; + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + gl_shader *sh = prog->_LinkedShaders[i]; + + if (sh == NULL) + continue; - unsigned offset = 0; - for (unsigned int i = 0; i < block->NumUniforms; i++) { - struct gl_uniform_buffer_variable *ubo_var = &block->Uniforms[i]; - const struct glsl_type *type = ubo_var->Type; + foreach_in_list(ir_instruction, node, sh->ir) { + ir_variable *var = node->as_variable(); + + if (var && var->data.mode == ir_var_uniform && + var->type->contains_image()) { + unsigned id = 0; + bool found = prog->UniformHash->get(id, var->name); + assert(found); + (void) found; + const gl_uniform_storage *storage = &prog->UniformStorage[id]; + const unsigned index = storage->image[i].index; + const GLenum access = (var->data.image_read_only ? GL_READ_ONLY : + var->data.image_write_only ? GL_WRITE_ONLY : + GL_READ_WRITE); + + for (unsigned j = 0; j < MAX2(1, storage->array_elements); ++j) + sh->ImageAccess[index + j] = access; + } + } + } +} - unsigned alignment = type->std140_base_alignment(ubo_var->RowMajor); - unsigned size = type->std140_size(ubo_var->RowMajor); +/** + * Sort the array of uniform storage so that the non-hidden uniforms are first + * + * This function sorts the list "in place." This is important because some of + * the storage accessible from \c uniforms has \c uniforms as its \c ralloc + * context. If \c uniforms is freed, some other storage will also be freed. + */ +static unsigned +move_hidden_uniforms_to_end(struct gl_shader_program *prog, + struct gl_uniform_storage *uniforms, + unsigned num_elements) +{ + struct gl_uniform_storage *sorted_uniforms = + ralloc_array(prog, struct gl_uniform_storage, num_elements); + unsigned hidden_uniforms = 0; + unsigned j = 0; + + /* Add the non-hidden uniforms. */ + for (unsigned i = 0; i < num_elements; i++) { + if (!uniforms[i].hidden) + sorted_uniforms[j++] = uniforms[i]; + } - offset = glsl_align(offset, alignment); - ubo_var->Offset = offset; - offset += size; + /* Add and count the hidden uniforms. */ + for (unsigned i = 0; i < num_elements; i++) { + if (uniforms[i].hidden) { + sorted_uniforms[j++] = uniforms[i]; + hidden_uniforms++; } + } - /* From the GL_ARB_uniform_buffer_object spec: - * - * "For uniform blocks laid out according to [std140] rules, - * the minimum buffer object size returned by the - * UNIFORM_BLOCK_DATA_SIZE query is derived by taking the - * offset of the last basic machine unit consumed by the - * last uniform of the uniform block (including any - * end-of-array or end-of-structure padding), adding one, - * and rounding up to the next multiple of the base - * alignment required for a vec4." - */ - block->UniformBufferSize = glsl_align(offset, 16); + assert(prog->UniformHash != NULL); + prog->UniformHash->clear(); + for (unsigned i = 0; i < num_elements; i++) { + if (sorted_uniforms[i].name != NULL) + prog->UniformHash->put(i, sorted_uniforms[i].name); } + + memcpy(uniforms, sorted_uniforms, sizeof(uniforms[0]) * num_elements); + ralloc_free(sorted_uniforms); + + return hidden_uniforms; } void -link_assign_uniform_locations(struct gl_shader_program *prog) +link_assign_uniform_locations(struct gl_shader_program *prog, + unsigned int boolean_true) { ralloc_free(prog->UniformStorage); prog->UniformStorage = NULL; - prog->NumUserUniformStorage = 0; + prog->NumUniformStorage = 0; if (prog->UniformHash != NULL) { prog->UniformHash->clear(); @@ -633,17 +939,6 @@ link_assign_uniform_locations(struct gl_shader_program *prog) prog->UniformHash = new string_to_uint_map; } - /* Uniforms that lack an initializer in the shader code have an initial - * value of zero. This includes sampler uniforms. - * - * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says: - * - * "The link time initial value is either the value of the variable's - * initializer, if present, or 0 if no initializer is present. Sampler - * types cannot have initializers." - */ - memset(prog->SamplerUnits, 0, sizeof(prog->SamplerUnits)); - /* First pass: Count the uniform resources used by the user-defined * uniforms. While this happens, each active uniform will have an index * assigned to it. @@ -652,48 +947,61 @@ link_assign_uniform_locations(struct gl_shader_program *prog) * glGetUniformLocation. */ count_uniform_size uniform_size(prog->UniformHash); - for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { - if (prog->_LinkedShaders[i] == NULL) + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + struct gl_shader *sh = prog->_LinkedShaders[i]; + + if (sh == NULL) continue; - link_update_uniform_buffer_variables(prog->_LinkedShaders[i]); + /* Uniforms that lack an initializer in the shader code have an initial + * value of zero. This includes sampler uniforms. + * + * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says: + * + * "The link time initial value is either the value of the variable's + * initializer, if present, or 0 if no initializer is present. Sampler + * types cannot have initializers." + */ + memset(sh->SamplerUnits, 0, sizeof(sh->SamplerUnits)); + memset(sh->ImageUnits, 0, sizeof(sh->ImageUnits)); + + link_update_uniform_buffer_variables(sh); /* Reset various per-shader target counts. */ uniform_size.start_shader(); - foreach_list(node, prog->_LinkedShaders[i]->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); + foreach_in_list(ir_instruction, node, sh->ir) { + ir_variable *const var = node->as_variable(); - if ((var == NULL) || (var->mode != ir_var_uniform)) + if ((var == NULL) || (var->data.mode != ir_var_uniform && + var->data.mode != ir_var_shader_storage)) continue; - /* FINISHME: Update code to process built-in uniforms! - */ - if (strncmp("gl_", var->name, 3) == 0) { - uniform_size.num_shader_uniform_components += - var->type->component_slots(); - continue; - } - uniform_size.process(var); } - prog->_LinkedShaders[i]->num_samplers = uniform_size.num_shader_samplers; - prog->_LinkedShaders[i]->num_uniform_components = - uniform_size.num_shader_uniform_components; + sh->num_samplers = uniform_size.num_shader_samplers; + sh->NumImages = uniform_size.num_shader_images; + sh->num_uniform_components = uniform_size.num_shader_uniform_components; + sh->num_combined_uniform_components = sh->num_uniform_components; + + for (unsigned i = 0; i < sh->NumUniformBlocks; i++) { + sh->num_combined_uniform_components += + sh->UniformBlocks[i].UniformBufferSize / 4; + } } - const unsigned num_user_uniforms = uniform_size.num_active_uniforms; + const unsigned num_uniforms = uniform_size.num_active_uniforms; const unsigned num_data_slots = uniform_size.num_values; /* On the outside chance that there were no uniforms, bail out. */ - if (num_user_uniforms == 0) + if (num_uniforms == 0) return; struct gl_uniform_storage *uniforms = - rzalloc_array(prog, struct gl_uniform_storage, num_user_uniforms); + rzalloc_array(prog, struct gl_uniform_storage, num_uniforms); union gl_constant_value *data = rzalloc_array(uniforms, union gl_constant_value, num_data_slots); #ifndef NDEBUG @@ -702,23 +1010,16 @@ link_assign_uniform_locations(struct gl_shader_program *prog) parcel_out_uniform_storage parcel(prog->UniformHash, uniforms, data); - for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { if (prog->_LinkedShaders[i] == NULL) continue; - /* Reset various per-shader target counts. - */ - parcel.start_shader(); - - foreach_list(node, prog->_LinkedShaders[i]->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); + parcel.start_shader((gl_shader_stage)i); - if ((var == NULL) || (var->mode != ir_var_uniform)) - continue; + foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) { + ir_variable *const var = node->as_variable(); - /* FINISHME: Update code to process built-in uniforms! - */ - if (strncmp("gl_", var->name, 3) == 0) + if ((var == NULL) || (var->data.mode != ir_var_uniform && var->data.mode != ir_var_shader_storage)) continue; parcel.set_and_process(prog, var); @@ -726,23 +1027,140 @@ link_assign_uniform_locations(struct gl_shader_program *prog) prog->_LinkedShaders[i]->active_samplers = parcel.shader_samplers_used; prog->_LinkedShaders[i]->shadow_samplers = parcel.shader_shadow_samplers; + + STATIC_ASSERT(sizeof(prog->_LinkedShaders[i]->SamplerTargets) == sizeof(parcel.targets)); + memcpy(prog->_LinkedShaders[i]->SamplerTargets, parcel.targets, + sizeof(prog->_LinkedShaders[i]->SamplerTargets)); + } + + const unsigned hidden_uniforms = + move_hidden_uniforms_to_end(prog, uniforms, num_uniforms); + + /* Reserve all the explicit locations of the active uniforms. */ + for (unsigned i = 0; i < num_uniforms; i++) { + if (uniforms[i].type->is_subroutine()) + continue; + + if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC) { + /* How many new entries for this uniform? */ + const unsigned entries = MAX2(1, uniforms[i].array_elements); + + /* Set remap table entries point to correct gl_uniform_storage. */ + for (unsigned j = 0; j < entries; j++) { + unsigned element_loc = uniforms[i].remap_location + j; + assert(prog->UniformRemapTable[element_loc] == + INACTIVE_UNIFORM_EXPLICIT_LOCATION); + prog->UniformRemapTable[element_loc] = &uniforms[i]; + } + } + } + + /* Reserve locations for rest of the uniforms. */ + for (unsigned i = 0; i < num_uniforms; i++) { + + if (uniforms[i].type->is_subroutine()) + continue; + /* Built-in uniforms should not get any location. */ + if (uniforms[i].builtin) + continue; + + /* Explicit ones have been set already. */ + if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC) + continue; + + /* how many new entries for this uniform? */ + const unsigned entries = MAX2(1, uniforms[i].array_elements); + + /* resize remap table to fit new entries */ + prog->UniformRemapTable = + reralloc(prog, + prog->UniformRemapTable, + gl_uniform_storage *, + prog->NumUniformRemapTable + entries); + + /* set pointers for this uniform */ + for (unsigned j = 0; j < entries; j++) + prog->UniformRemapTable[prog->NumUniformRemapTable+j] = &uniforms[i]; + + /* set the base location in remap table for the uniform */ + uniforms[i].remap_location = prog->NumUniformRemapTable; + + prog->NumUniformRemapTable += entries; } - assert(sizeof(prog->SamplerTargets) == sizeof(parcel.targets)); - memcpy(prog->SamplerTargets, parcel.targets, sizeof(prog->SamplerTargets)); + /* Reserve all the explicit locations of the active subroutine uniforms. */ + for (unsigned i = 0; i < num_uniforms; i++) { + if (!uniforms[i].type->is_subroutine()) + continue; + + if (uniforms[i].remap_location == UNMAPPED_UNIFORM_LOC) + continue; + + for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) { + struct gl_shader *sh = prog->_LinkedShaders[j]; + if (!sh) + continue; + + if (!uniforms[i].subroutine[j].active) + continue; + + /* How many new entries for this uniform? */ + const unsigned entries = MAX2(1, uniforms[i].array_elements); + + /* Set remap table entries point to correct gl_uniform_storage. */ + for (unsigned k = 0; k < entries; k++) { + unsigned element_loc = uniforms[i].remap_location + k; + assert(sh->SubroutineUniformRemapTable[element_loc] == + INACTIVE_UNIFORM_EXPLICIT_LOCATION); + sh->SubroutineUniformRemapTable[element_loc] = &uniforms[i]; + } + } + } + + /* reserve subroutine locations */ + for (unsigned i = 0; i < num_uniforms; i++) { + + if (!uniforms[i].type->is_subroutine()) + continue; + const unsigned entries = MAX2(1, uniforms[i].array_elements); + + if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC) + continue; + for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) { + struct gl_shader *sh = prog->_LinkedShaders[j]; + if (!sh) + continue; + + if (!uniforms[i].subroutine[j].active) + continue; + + sh->SubroutineUniformRemapTable = + reralloc(sh, + sh->SubroutineUniformRemapTable, + gl_uniform_storage *, + sh->NumSubroutineUniformRemapTable + entries); + + for (unsigned k = 0; k < entries; k++) + sh->SubroutineUniformRemapTable[sh->NumSubroutineUniformRemapTable + k] = &uniforms[i]; + uniforms[i].remap_location = sh->NumSubroutineUniformRemapTable; + sh->NumSubroutineUniformRemapTable += entries; + } + } #ifndef NDEBUG - for (unsigned i = 0; i < num_user_uniforms; i++) { - assert(uniforms[i].storage != NULL); + for (unsigned i = 0; i < num_uniforms; i++) { + assert(uniforms[i].storage != NULL || uniforms[i].builtin); } assert(parcel.values == data_end); #endif - prog->NumUserUniformStorage = num_user_uniforms; + prog->NumUniformStorage = num_uniforms; + prog->NumHiddenUniforms = hidden_uniforms; prog->UniformStorage = uniforms; - link_set_uniform_initializers(prog); + link_set_image_access_qualifiers(prog); + link_set_uniform_initializers(prog, boolean_true); return; }