#include "glsl_symbol_table.h"
#include "program/hash_table.h"
#include "program.h"
-
-static inline unsigned int
-align(unsigned int a, unsigned int align)
-{
- return (a + align - 1) / align * align;
-}
+#include "util/hash_table.h"
/**
* \file link_uniforms.cpp
* \author Ian Romanick <ian.d.romanick@intel.com>
*/
+/**
+ * Used by linker to indicate uniforms that have no location set.
+ */
+#define UNMAPPED_UNIFORM_LOC ~0u
+
/**
* Count the backing storage requirements for a type
*/
}
void
-uniform_field_visitor::process(ir_variable *var)
+program_resource_visitor::process(const glsl_type *type, const char *name)
{
+ assert(type->without_array()->is_record()
+ || type->without_array()->is_interface());
+
+ unsigned record_array_count = 1;
+ char *name_copy = ralloc_strdup(NULL, name);
+ unsigned packing = type->interface_packing;
+
+ recursion(type, &name_copy, strlen(name), false, NULL, packing, false,
+ record_array_count);
+ ralloc_free(name_copy);
+}
+
+void
+program_resource_visitor::process(ir_variable *var)
+{
+ unsigned record_array_count = 1;
const glsl_type *t = var->type;
+ const glsl_type *t_without_array = var->type->without_array();
+ const bool row_major =
+ var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
+
+ const unsigned packing = var->get_interface_type() ?
+ var->get_interface_type()->interface_packing :
+ var->type->interface_packing;
+
+ /* false is always passed for the row_major parameter to the other
+ * processing functions because no information is available to do
+ * otherwise. See the warning in linker.h.
+ */
/* Only strdup the name if we actually will need to modify it. */
- if (t->is_record() || (t->is_array() && t->fields.array->is_record())) {
+ if (var->data.from_named_ifc_block_array) {
+ /* lower_named_interface_blocks created this variable by lowering an
+ * interface block array to an array variable. For example if the
+ * original source code was:
+ *
+ * out Blk { vec4 bar } foo[3];
+ *
+ * Then the variable is now:
+ *
+ * out vec4 bar[3];
+ *
+ * We need to visit each array element using the names constructed like
+ * so:
+ *
+ * Blk[0].bar
+ * Blk[1].bar
+ * Blk[2].bar
+ */
+ assert(t->is_array());
+ const glsl_type *ifc_type = var->get_interface_type();
+ char *name = ralloc_strdup(NULL, ifc_type->name);
+ size_t name_length = strlen(name);
+ for (unsigned i = 0; i < t->length; i++) {
+ size_t new_length = name_length;
+ ralloc_asprintf_rewrite_tail(&name, &new_length, "[%u].%s", i,
+ var->name);
+ /* Note: row_major is only meaningful for uniform blocks, and
+ * lowering is only applied to non-uniform interface blocks, so we
+ * can safely pass false for row_major.
+ */
+ recursion(var->type, &name, new_length, row_major, NULL, packing,
+ false, record_array_count);
+ }
+ ralloc_free(name);
+ } else if (var->data.from_named_ifc_block_nonarray) {
+ /* lower_named_interface_blocks created this variable by lowering a
+ * named interface block (non-array) to an ordinary variable. For
+ * example if the original source code was:
+ *
+ * out Blk { vec4 bar } foo;
+ *
+ * Then the variable is now:
+ *
+ * out vec4 bar;
+ *
+ * We need to visit this variable using the name:
+ *
+ * Blk.bar
+ */
+ const glsl_type *ifc_type = var->get_interface_type();
+ char *name = ralloc_asprintf(NULL, "%s.%s", ifc_type->name, var->name);
+ /* Note: row_major is only meaningful for uniform blocks, and lowering
+ * is only applied to non-uniform interface blocks, so we can safely
+ * pass false for row_major.
+ */
+ recursion(var->type, &name, strlen(name), row_major, NULL, packing,
+ false, record_array_count);
+ ralloc_free(name);
+ } else if (t_without_array->is_record() ||
+ (t->is_array() && t->fields.array->is_array())) {
char *name = ralloc_strdup(NULL, var->name);
- recursion(var->type, &name, strlen(name));
+ recursion(var->type, &name, strlen(name), row_major, NULL, packing,
+ false, record_array_count);
+ ralloc_free(name);
+ } else if (t_without_array->is_interface()) {
+ char *name = ralloc_strdup(NULL, t_without_array->name);
+ recursion(var->type, &name, strlen(name), row_major, NULL, packing,
+ false, record_array_count);
ralloc_free(name);
} else {
- this->visit_field(t, var->name);
+ this->set_record_array_count(record_array_count);
+ this->visit_field(t, var->name, row_major, NULL, packing, false);
}
}
void
-uniform_field_visitor::recursion(const glsl_type *t, char **name,
- size_t name_length)
+program_resource_visitor::recursion(const glsl_type *t, char **name,
+ size_t name_length, bool row_major,
+ const glsl_type *record_type,
+ const unsigned packing,
+ bool last_field,
+ unsigned record_array_count)
{
/* Records need to have each field processed individually.
*
* individually, then each field of the resulting array elements processed
* individually.
*/
- if (t->is_record()) {
+ if (t->is_record() || t->is_interface()) {
+ if (record_type == NULL && t->is_record())
+ record_type = t;
+
+ if (t->is_record())
+ this->enter_record(t, *name, row_major, packing);
+
for (unsigned i = 0; i < t->length; i++) {
const char *field = t->fields.structure[i].name;
size_t new_length = name_length;
- /* Append '.field' to the current uniform name. */
- ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
+ if (t->fields.structure[i].type->is_record())
+ this->visit_field(&t->fields.structure[i]);
+
+ /* Append '.field' to the current variable name. */
+ if (name_length == 0) {
+ ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
+ } else {
+ ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
+ }
+
+ /* The layout of structures at the top level of the block is set
+ * during parsing. For matrices contained in multiple levels of
+ * structures in the block, the inner structures have no layout.
+ * These cases must potentially inherit the layout from the outer
+ * levels.
+ */
+ bool field_row_major = row_major;
+ const enum glsl_matrix_layout matrix_layout =
+ glsl_matrix_layout(t->fields.structure[i].matrix_layout);
+ if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
+ field_row_major = true;
+ } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
+ field_row_major = false;
+ }
+
+ recursion(t->fields.structure[i].type, name, new_length,
+ field_row_major,
+ record_type,
+ packing,
+ (i + 1) == t->length, record_array_count);
+
+ /* Only the first leaf-field of the record gets called with the
+ * record type pointer.
+ */
+ record_type = NULL;
+ }
- recursion(t->fields.structure[i].type, name, new_length);
+ if (t->is_record()) {
+ (*name)[name_length] = '\0';
+ this->leave_record(t, *name, row_major, packing);
}
- } else if (t->is_array() && t->fields.array->is_record()) {
- for (unsigned i = 0; i < t->length; i++) {
+ } else if (t->without_array()->is_record() ||
+ t->without_array()->is_interface() ||
+ (t->is_array() && t->fields.array->is_array())) {
+ if (record_type == NULL && t->fields.array->is_record())
+ record_type = t->fields.array;
+
+ unsigned length = t->length;
+ /* Shader storage block unsized arrays: add subscript [0] to variable
+ * names */
+ if (t->is_unsized_array())
+ length = 1;
+
+ record_array_count *= length;
+
+ for (unsigned i = 0; i < length; i++) {
size_t new_length = name_length;
- /* Append the subscript to the current uniform name */
+ /* Append the subscript to the current variable name */
ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
- recursion(t->fields.array, name, new_length);
+ recursion(t->fields.array, name, new_length, row_major,
+ record_type,
+ packing,
+ (i + 1) == t->length, record_array_count);
+
+ /* Only the first leaf-field of the record gets called with the
+ * record type pointer.
+ */
+ record_type = NULL;
}
} else {
- this->visit_field(t, *name);
+ this->set_record_array_count(record_array_count);
+ this->visit_field(t, *name, row_major, record_type, packing, last_field);
}
}
+void
+program_resource_visitor::visit_field(const glsl_type *type, const char *name,
+ bool row_major,
+ const glsl_type *,
+ const unsigned,
+ bool /* last_field */)
+{
+ visit_field(type, name, row_major);
+}
+
+void
+program_resource_visitor::visit_field(const glsl_struct_field *field)
+{
+ (void) field;
+ /* empty */
+}
+
+void
+program_resource_visitor::enter_record(const glsl_type *, const char *, bool,
+ const unsigned)
+{
+}
+
+void
+program_resource_visitor::leave_record(const glsl_type *, const char *, bool,
+ const unsigned)
+{
+}
+
+void
+program_resource_visitor::set_record_array_count(unsigned)
+{
+}
+
+namespace {
+
/**
* Class to help calculate the storage requirements for a set of uniforms
*
* As uniforms are added to the active set the number of active uniforms and
* the storage requirements for those uniforms are accumulated. The active
- * uniforms are added the the hash table supplied to the constructor.
+ * uniforms are added to the hash table supplied to the constructor.
*
* If the same uniform is added multiple times (i.e., once for each shader
* target), it will only be accounted once.
*/
-class count_uniform_size : public uniform_field_visitor {
+class count_uniform_size : public program_resource_visitor {
public:
- count_uniform_size(struct string_to_uint_map *map)
- : num_active_uniforms(0), num_values(0), num_shader_samplers(0),
- num_shader_uniform_components(0), map(map)
+ count_uniform_size(struct string_to_uint_map *map,
+ struct string_to_uint_map *hidden_map)
+ : num_active_uniforms(0), num_hidden_uniforms(0), num_values(0),
+ num_shader_samplers(0), num_shader_images(0),
+ num_shader_uniform_components(0), num_shader_subroutines(0),
+ is_ubo_var(false), is_shader_storage(false), map(map),
+ hidden_map(hidden_map)
{
/* empty */
}
void start_shader()
{
this->num_shader_samplers = 0;
+ this->num_shader_images = 0;
this->num_shader_uniform_components = 0;
+ this->num_shader_subroutines = 0;
+ }
+
+ void process(ir_variable *var)
+ {
+ this->current_var = var;
+ this->is_ubo_var = var->is_in_buffer_block();
+ this->is_shader_storage = var->is_in_shader_storage_block();
+ if (var->is_interface_instance())
+ program_resource_visitor::process(var->get_interface_type(),
+ var->get_interface_type()->name);
+ else
+ program_resource_visitor::process(var);
}
/**
*/
unsigned num_active_uniforms;
+ unsigned num_hidden_uniforms;
+
/**
* Number of data values required to back the storage for the active uniforms
*/
*/
unsigned num_shader_samplers;
+ /**
+ * Number of images used
+ */
+ unsigned num_shader_images;
+
/**
* Number of uniforms used in the current shader
*/
unsigned num_shader_uniform_components;
+ /**
+ * Number of subroutine uniforms used
+ */
+ unsigned num_shader_subroutines;
+
+ bool is_ubo_var;
+ bool is_shader_storage;
+
+ struct string_to_uint_map *map;
+
private:
- virtual void visit_field(const glsl_type *type, const char *name)
+ virtual void visit_field(const glsl_type *type, const char *name,
+ bool row_major)
{
- assert(!type->is_record());
- assert(!(type->is_array() && type->fields.array->is_record()));
+ assert(!type->without_array()->is_record());
+ assert(!type->without_array()->is_interface());
+ assert(!(type->is_array() && type->fields.array->is_array()));
+
+ (void) row_major;
/* Count the number of samplers regardless of whether the uniform is
* already in the hash table. The hash table prevents adding the same
* count it for each shader target.
*/
const unsigned values = values_for_type(type);
- if (type->contains_sampler()) {
- this->num_shader_samplers +=
- type->is_array() ? type->array_size() : 1;
+ if (type->contains_subroutine()) {
+ this->num_shader_subroutines += values;
+ } else if (type->contains_sampler()) {
+ this->num_shader_samplers += values;
+ } else if (type->contains_image()) {
+ this->num_shader_images += values;
+
+ /* As drivers are likely to represent image uniforms as
+ * scalar indices, count them against the limit of uniform
+ * components in the default block. The spec allows image
+ * uniforms to use up no more than one scalar slot.
+ */
+ if(!is_shader_storage)
+ this->num_shader_uniform_components += values;
} else {
/* Accumulate the total number of uniform slots used by this shader.
* Note that samplers do not count against this limit because they
* don't use any storage on current hardware.
*/
- this->num_shader_uniform_components += values;
+ if (!is_ubo_var && !is_shader_storage)
+ this->num_shader_uniform_components += values;
}
/* If the uniform is already in the map, there's nothing more to do.
if (this->map->get(id, name))
return;
- this->map->put(this->num_active_uniforms, name);
+ if (this->current_var->data.how_declared == ir_var_hidden) {
+ this->hidden_map->put(this->num_hidden_uniforms, name);
+ this->num_hidden_uniforms++;
+ } else {
+ this->map->put(this->num_active_uniforms-this->num_hidden_uniforms,
+ name);
+ }
/* Each leaf uniform occupies one entry in the list of active
* uniforms.
this->num_values += values;
}
- struct string_to_uint_map *map;
+ struct string_to_uint_map *hidden_map;
+
+ /**
+ * Current variable being processed.
+ */
+ ir_variable *current_var;
};
+} /* anonymous namespace */
+
/**
* Class to help parcel out pieces of backing storage to uniforms
*
* the \c gl_uniform_storage and \c gl_constant_value arrays are "big
* enough."
*/
-class parcel_out_uniform_storage : public uniform_field_visitor {
+class parcel_out_uniform_storage : public program_resource_visitor {
public:
parcel_out_uniform_storage(struct string_to_uint_map *map,
struct gl_uniform_storage *uniforms,
union gl_constant_value *values)
- : map(map), uniforms(uniforms), next_sampler(0), values(values)
+ : map(map), uniforms(uniforms), values(values)
{
- memset(this->targets, 0, sizeof(this->targets));
}
- void start_shader()
+ void start_shader(gl_shader_stage shader_type)
{
+ assert(shader_type < MESA_SHADER_STAGES);
+ this->shader_type = shader_type;
+
this->shader_samplers_used = 0;
this->shader_shadow_samplers = 0;
+ this->next_sampler = 0;
+ this->next_image = 0;
+ this->next_subroutine = 0;
+ this->record_array_count = 1;
+ memset(this->targets, 0, sizeof(this->targets));
}
- void set_and_process(struct gl_shader *shader,
+ void set_and_process(struct gl_shader_program *prog,
ir_variable *var)
{
- ubo_var = NULL;
- if (var->uniform_block != -1) {
- struct gl_uniform_block *block =
- &shader->UniformBlocks[var->uniform_block];
-
- ubo_block_index = var->uniform_block;
- ubo_var_index = var->location;
- ubo_var = &block->Uniforms[var->location];
- ubo_byte_offset = ubo_var->Offset;
- }
+ current_var = var;
+ field_counter = 0;
+ this->record_next_sampler = new string_to_uint_map;
+
+ ubo_block_index = -1;
+ if (var->is_in_buffer_block()) {
+ if (var->is_interface_instance() && var->type->is_array()) {
+ unsigned l = strlen(var->get_interface_type()->name);
+
+ for (unsigned i = 0; i < prog->NumBufferInterfaceBlocks; i++) {
+ if (strncmp(var->get_interface_type()->name,
+ prog->BufferInterfaceBlocks[i].Name,
+ l) == 0
+ && prog->BufferInterfaceBlocks[i].Name[l] == '[') {
+ ubo_block_index = i;
+ break;
+ }
+ }
+ } else {
+ for (unsigned i = 0; i < prog->NumBufferInterfaceBlocks; i++) {
+ if (strcmp(var->get_interface_type()->name,
+ prog->BufferInterfaceBlocks[i].Name) == 0) {
+ ubo_block_index = i;
+ break;
+ }
+ }
+ }
+ assert(ubo_block_index != -1);
+
+ /* Uniform blocks that were specified with an instance name must be
+ * handled a little bit differently. The name of the variable is the
+ * name used to reference the uniform block instead of being the name
+ * of a variable within the block. Therefore, searching for the name
+ * within the block will fail.
+ */
+ if (var->is_interface_instance()) {
+ ubo_byte_offset = 0;
+ process(var->get_interface_type(),
+ var->get_interface_type()->name);
+ } else {
+ const struct gl_uniform_block *const block =
+ &prog->BufferInterfaceBlocks[ubo_block_index];
+
+ assert(var->data.location != -1);
+
+ const struct gl_uniform_buffer_variable *const ubo_var =
+ &block->Uniforms[var->data.location];
+
+ ubo_byte_offset = ubo_var->Offset;
+ process(var);
+ }
+ } else {
+ /* Store any explicit location and reset data location so we can
+ * reuse this variable for storing the uniform slot number.
+ */
+ this->explicit_location = current_var->data.location;
+ current_var->data.location = -1;
- process(var);
+ process(var);
+ }
+ delete this->record_next_sampler;
}
- struct gl_uniform_buffer_variable *ubo_var;
int ubo_block_index;
- int ubo_var_index;
int ubo_byte_offset;
+ gl_shader_stage shader_type;
private:
- virtual void visit_field(const glsl_type *type, const char *name)
+ void handle_samplers(const glsl_type *base_type,
+ struct gl_uniform_storage *uniform, const char *name)
{
- assert(!type->is_record());
- assert(!(type->is_array() && type->fields.array->is_record()));
+ if (base_type->is_sampler()) {
+ uniform->opaque[shader_type].active = true;
+
+ /* Handle multiple samplers inside struct arrays */
+ if (this->record_array_count > 1) {
+ unsigned inner_array_size = MAX2(1, uniform->array_elements);
+ char *name_copy = ralloc_strdup(NULL, name);
+
+ /* Remove all array subscripts from the sampler name */
+ char *str_start;
+ const char *str_end;
+ while((str_start = strchr(name_copy, '[')) &&
+ (str_end = strchr(name_copy, ']'))) {
+ memmove(str_start, str_end + 1, 1 + strlen(str_end));
+ }
+
+ unsigned index = 0;
+ if (this->record_next_sampler->get(index, name_copy)) {
+ /* In this case, we've already seen this uniform so we just use
+ * the next sampler index recorded the last time we visited.
+ */
+ uniform->opaque[shader_type].index = index;
+ index = inner_array_size + uniform->opaque[shader_type].index;
+ this->record_next_sampler->put(index, name_copy);
+
+ ralloc_free(name_copy);
+ /* Return as everything else has already been initialised in a
+ * previous pass.
+ */
+ return;
+ } else {
+ /* We've never seen this uniform before so we need to allocate
+ * enough indices to store it.
+ *
+ * Nested struct arrays behave like arrays of arrays so we need
+ * to increase the index by the total number of elements of the
+ * sampler in case there is more than one sampler inside the
+ * structs. This allows the offset to be easily calculated for
+ * indirect indexing.
+ */
+ uniform->opaque[shader_type].index = this->next_sampler;
+ this->next_sampler +=
+ inner_array_size * this->record_array_count;
+
+ /* Store the next index for future passes over the struct array
+ */
+ index = uniform->opaque[shader_type].index + inner_array_size;
+ this->record_next_sampler->put(index, name_copy);
+ ralloc_free(name_copy);
+ }
+ } else {
+ /* Increment the sampler by 1 for non-arrays and by the number of
+ * array elements for arrays.
+ */
+ uniform->opaque[shader_type].index = this->next_sampler;
+ this->next_sampler += MAX2(1, uniform->array_elements);
+ }
+
+ const gl_texture_index target = base_type->sampler_index();
+ const unsigned shadow = base_type->sampler_shadow;
+ for (unsigned i = uniform->opaque[shader_type].index;
+ i < MIN2(this->next_sampler, MAX_SAMPLERS);
+ i++) {
+ this->targets[i] = target;
+ this->shader_samplers_used |= 1U << i;
+ this->shader_shadow_samplers |= shadow << i;
+ }
+ }
+ }
- unsigned id;
- bool found = this->map->get(id, name);
- assert(found);
+ void handle_images(const glsl_type *base_type,
+ struct gl_uniform_storage *uniform)
+ {
+ if (base_type->is_image()) {
+ uniform->opaque[shader_type].index = this->next_image;
+ uniform->opaque[shader_type].active = true;
- if (!found)
- return;
+ /* Increment the image index by 1 for non-arrays and by the
+ * number of array elements for arrays.
+ */
+ this->next_image += MAX2(1, uniform->array_elements);
- /* If there is already storage associated with this uniform, it means
- * that it was set while processing an earlier shader stage. For
- * example, we may be processing the uniform in the fragment shader, but
- * the uniform was already processed in the vertex shader.
- */
- if (this->uniforms[id].storage != NULL) {
- /* If the uniform already has storage set from another shader stage,
- * mark the samplers used for this shader stage.
- */
- if (type->contains_sampler()) {
- const unsigned count = MAX2(1, this->uniforms[id].array_elements);
- const unsigned shadow = (type->is_array())
- ? type->fields.array->sampler_shadow : type->sampler_shadow;
+ }
+ }
- for (unsigned i = 0; i < count; i++) {
- const unsigned s = this->uniforms[id].sampler + i;
+ void handle_subroutines(const glsl_type *base_type,
+ struct gl_uniform_storage *uniform)
+ {
+ if (base_type->is_subroutine()) {
+ uniform->opaque[shader_type].index = this->next_subroutine;
+ uniform->opaque[shader_type].active = true;
- this->shader_samplers_used |= 1U << s;
- this->shader_shadow_samplers |= shadow << s;
- }
- }
+ /* Increment the subroutine index by 1 for non-arrays and by the
+ * number of array elements for arrays.
+ */
+ this->next_subroutine += MAX2(1, uniform->array_elements);
- return;
}
+ }
+
+ virtual void set_record_array_count(unsigned record_array_count)
+ {
+ this->record_array_count = record_array_count;
+ }
+
+ virtual void visit_field(const glsl_type *type, const char *name,
+ bool row_major)
+ {
+ (void) type;
+ (void) name;
+ (void) row_major;
+ assert(!"Should not get here.");
+ }
+
+ virtual void enter_record(const glsl_type *type, const char *,
+ bool row_major, const unsigned packing) {
+ assert(type->is_record());
+ if (this->ubo_block_index == -1)
+ return;
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->ubo_byte_offset = glsl_align(
+ this->ubo_byte_offset, type->std430_base_alignment(row_major));
+ else
+ this->ubo_byte_offset = glsl_align(
+ this->ubo_byte_offset, type->std140_base_alignment(row_major));
+ }
+
+ virtual void leave_record(const glsl_type *type, const char *,
+ bool row_major, const unsigned packing) {
+ assert(type->is_record());
+ if (this->ubo_block_index == -1)
+ return;
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->ubo_byte_offset = glsl_align(
+ this->ubo_byte_offset, type->std430_base_alignment(row_major));
+ else
+ this->ubo_byte_offset = glsl_align(
+ this->ubo_byte_offset, type->std140_base_alignment(row_major));
+ }
+
+ virtual void visit_field(const glsl_type *type, const char *name,
+ bool row_major, const glsl_type *record_type,
+ const unsigned packing,
+ bool /* last_field */)
+ {
+ assert(!type->without_array()->is_record());
+ assert(!type->without_array()->is_interface());
+ assert(!(type->is_array() && type->fields.array->is_array()));
+
+ unsigned id;
+ bool found = this->map->get(id, name);
+ assert(found);
+
+ if (!found)
+ return;
const glsl_type *base_type;
if (type->is_array()) {
base_type = type;
}
- if (base_type->is_sampler()) {
- this->uniforms[id].sampler = this->next_sampler;
+ /* Initialise opaque data */
+ this->uniforms[id].opaque[shader_type].index = ~0;
+ this->uniforms[id].opaque[shader_type].active = false;
- /* Increment the sampler by 1 for non-arrays and by the number of
- * array elements for arrays.
- */
- this->next_sampler += MAX2(1, this->uniforms[id].array_elements);
-
- const gl_texture_index target = base_type->sampler_index();
- const unsigned shadow = base_type->sampler_shadow;
- for (unsigned i = this->uniforms[id].sampler
- ; i < this->next_sampler
- ; i++) {
- this->targets[i] = target;
- this->shader_samplers_used |= 1U << i;
- this->shader_shadow_samplers |= shadow << i;
- }
+ /* This assigns uniform indices to sampler and image uniforms. */
+ handle_samplers(base_type, &this->uniforms[id], name);
+ handle_images(base_type, &this->uniforms[id]);
+ handle_subroutines(base_type, &this->uniforms[id]);
+
+ /* For array of arrays or struct arrays the base location may have
+ * already been set so don't set it again.
+ */
+ if (ubo_block_index == -1 && current_var->data.location == -1) {
+ current_var->data.location = id;
+ }
+ /* If there is already storage associated with this uniform or if the
+ * uniform is set as builtin, it means that it was set while processing
+ * an earlier shader stage. For example, we may be processing the
+ * uniform in the fragment shader, but the uniform was already processed
+ * in the vertex shader.
+ */
+ if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) {
+ return;
+ }
+
+ /* Assign explicit locations. */
+ if (current_var->data.explicit_location) {
+ /* Set sequential locations for struct fields. */
+ if (current_var->type->without_array()->is_record() ||
+ current_var->type->is_array_of_arrays()) {
+ const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
+ this->uniforms[id].remap_location =
+ this->explicit_location + field_counter;
+ field_counter += entries;
+ } else {
+ this->uniforms[id].remap_location = this->explicit_location;
+ }
} else {
- this->uniforms[id].sampler = ~0;
+ /* Initialize to to indicate that no location is set */
+ this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
}
this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
this->uniforms[id].initialized = 0;
this->uniforms[id].num_driver_storage = 0;
this->uniforms[id].driver_storage = NULL;
- this->uniforms[id].storage = this->values;
- if (this->ubo_var) {
- this->uniforms[id].block_index = this->ubo_block_index;
-
- unsigned alignment = type->std140_base_alignment(ubo_var->RowMajor);
- this->ubo_byte_offset = align(this->ubo_byte_offset, alignment);
- this->uniforms[id].offset = this->ubo_byte_offset;
- this->ubo_byte_offset += type->std140_size(ubo_var->RowMajor);
-
- if (type->is_array()) {
- this->uniforms[id].array_stride =
- align(type->fields.array->std140_size(ubo_var->RowMajor), 16);
+ this->uniforms[id].atomic_buffer_index = -1;
+ this->uniforms[id].hidden =
+ current_var->data.how_declared == ir_var_hidden;
+ this->uniforms[id].builtin = is_gl_identifier(name);
+
+ /* Do not assign storage if the uniform is builtin */
+ if (!this->uniforms[id].builtin)
+ this->uniforms[id].storage = this->values;
+
+ this->uniforms[id].is_shader_storage =
+ current_var->is_in_shader_storage_block();
+
+ if (this->ubo_block_index != -1) {
+ this->uniforms[id].block_index = this->ubo_block_index;
+
+ unsigned alignment = type->std140_base_alignment(row_major);
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ alignment = type->std430_base_alignment(row_major);
+ this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
+ this->uniforms[id].offset = this->ubo_byte_offset;
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->ubo_byte_offset += type->std430_size(row_major);
+ else
+ this->ubo_byte_offset += type->std140_size(row_major);
+
+ if (type->is_array()) {
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->uniforms[id].array_stride =
+ type->without_array()->std430_array_stride(row_major);
+ else
+ this->uniforms[id].array_stride =
+ glsl_align(type->without_array()->std140_size(row_major),
+ 16);
} else {
this->uniforms[id].array_stride = 0;
}
- if (type->is_matrix() ||
- (type->is_array() && type->fields.array->is_matrix())) {
- this->uniforms[id].matrix_stride = 16;
- this->uniforms[id].row_major = ubo_var->RowMajor;
+ if (type->without_array()->is_matrix()) {
+ const glsl_type *matrix = type->without_array();
+ const unsigned N = matrix->base_type == GLSL_TYPE_DOUBLE ? 8 : 4;
+ const unsigned items =
+ row_major ? matrix->matrix_columns : matrix->vector_elements;
+
+ assert(items <= 4);
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->uniforms[id].matrix_stride = items < 3 ? items * N :
+ glsl_align(items * N, 16);
+ else
+ this->uniforms[id].matrix_stride = glsl_align(items * N, 16);
+ this->uniforms[id].row_major = row_major;
} else {
this->uniforms[id].matrix_stride = 0;
this->uniforms[id].row_major = false;
struct gl_uniform_storage *uniforms;
unsigned next_sampler;
+ unsigned next_image;
+ unsigned next_subroutine;
+
+ /**
+ * Field counter is used to take care that uniform structures
+ * with explicit locations get sequential locations.
+ */
+ unsigned field_counter;
+
+ /**
+ * Current variable being processed.
+ */
+ ir_variable *current_var;
+
+ /* Used to store the explicit location from current_var so that we can
+ * reuse the location field for storing the uniform slot id.
+ */
+ int explicit_location;
+
+ /* Stores total struct array elements including nested structs */
+ unsigned record_array_count;
+
+ /* Map for temporarily storing next sampler index when handling samplers in
+ * struct arrays.
+ */
+ struct string_to_uint_map *record_next_sampler;
public:
union gl_constant_value *values;
{
for (unsigned int i = 0; i < *num_linked_blocks; i++) {
struct gl_uniform_block *old_block = &(*linked_blocks)[i];
- if (strcmp(old_block->Name, new_block->Name) == 0) {
- if (old_block->NumUniforms != new_block->NumUniforms) {
- return -1;
- }
-
- for (unsigned j = 0; j < old_block->NumUniforms; j++) {
- if (strcmp(old_block->Uniforms[j].Name,
- new_block->Uniforms[j].Name) != 0)
- return -1;
-
- if (old_block->Uniforms[j].Offset !=
- new_block->Uniforms[j].Offset)
- return -1;
- if (old_block->Uniforms[j].RowMajor !=
- new_block->Uniforms[j].RowMajor)
- return -1;
- }
- return i;
- }
+ if (strcmp(old_block->Name, new_block->Name) == 0)
+ return link_uniform_blocks_are_compatible(old_block, new_block)
+ ? i : -1;
}
*linked_blocks = reralloc(mem_ctx, *linked_blocks,
struct gl_uniform_buffer_variable *ubo_var =
&linked_block->Uniforms[i];
- ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
+ if (ubo_var->Name == ubo_var->IndexName) {
+ ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
+ ubo_var->IndexName = ubo_var->Name;
+ } else {
+ ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
+ ubo_var->IndexName = ralloc_strdup(*linked_blocks, ubo_var->IndexName);
+ }
}
return linked_block_index;
* would point at the uniform block list in one of the pre-linked
* shaders).
*/
-static bool
+static void
link_update_uniform_buffer_variables(struct gl_shader *shader)
{
- foreach_list(node, shader->ir) {
- ir_variable *const var = ((ir_instruction *) node)->as_variable();
+ foreach_in_list(ir_instruction, node, shader->ir) {
+ ir_variable *const var = node->as_variable();
- if ((var == NULL) || (var->uniform_block == -1))
+ if ((var == NULL) || !var->is_in_buffer_block())
continue;
- assert(var->mode == ir_var_uniform);
+ assert(var->data.mode == ir_var_uniform ||
+ var->data.mode == ir_var_shader_storage);
+
+ if (var->is_interface_instance()) {
+ var->data.location = 0;
+ continue;
+ }
bool found = false;
- for (unsigned i = 0; i < shader->NumUniformBlocks; i++) {
- for (unsigned j = 0; j < shader->UniformBlocks[i].NumUniforms; j++) {
- if (!strcmp(var->name, shader->UniformBlocks[i].Uniforms[j].Name)) {
+ char sentinel = '\0';
+
+ if (var->type->is_record()) {
+ sentinel = '.';
+ } else if (var->type->is_array() && (var->type->fields.array->is_array()
+ || var->type->without_array()->is_record())) {
+ sentinel = '[';
+ }
+
+ const unsigned l = strlen(var->name);
+ for (unsigned i = 0; i < shader->NumBufferInterfaceBlocks; i++) {
+ for (unsigned j = 0; j < shader->BufferInterfaceBlocks[i].NumUniforms; j++) {
+ if (sentinel) {
+ const char *begin = shader->BufferInterfaceBlocks[i].Uniforms[j].Name;
+ const char *end = strchr(begin, sentinel);
+
+ if (end == NULL)
+ continue;
+
+ if ((ptrdiff_t) l != (end - begin))
+ continue;
+
+ if (strncmp(var->name, begin, l) == 0) {
+ found = true;
+ var->data.location = j;
+ break;
+ }
+ } else if (!strcmp(var->name,
+ shader->BufferInterfaceBlocks[i].Uniforms[j].Name)) {
found = true;
- var->uniform_block = i;
- var->location = j;
+ var->data.location = j;
break;
}
}
}
assert(found);
}
-
- return true;
}
-void
-link_assign_uniform_block_offsets(struct gl_shader *shader)
+static void
+link_set_image_access_qualifiers(struct gl_shader_program *prog,
+ gl_shader *sh, unsigned shader_stage,
+ ir_variable *var, const glsl_type *type,
+ char **name, size_t name_length)
{
- for (unsigned b = 0; b < shader->NumUniformBlocks; b++) {
- struct gl_uniform_block *block = &shader->UniformBlocks[b];
+ /* Handle arrays of arrays */
+ if (type->is_array() && type->fields.array->is_array()) {
+ for (unsigned i = 0; i < type->length; i++) {
+ size_t new_length = name_length;
- unsigned offset = 0;
- for (unsigned int i = 0; i < block->NumUniforms; i++) {
- struct gl_uniform_buffer_variable *ubo_var = &block->Uniforms[i];
- const struct glsl_type *type = ubo_var->Type;
+ /* Append the subscript to the current variable name */
+ ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
- unsigned alignment = type->std140_base_alignment(ubo_var->RowMajor);
- unsigned size = type->std140_size(ubo_var->RowMajor);
+ link_set_image_access_qualifiers(prog, sh, shader_stage, var,
+ type->fields.array, name,
+ new_length);
+ }
+ } else {
+ unsigned id = 0;
+ bool found = prog->UniformHash->get(id, *name);
+ assert(found);
+ (void) found;
+ const gl_uniform_storage *storage = &prog->UniformStorage[id];
+ const unsigned index = storage->opaque[shader_stage].index;
+ const GLenum access = (var->data.image_read_only ? GL_READ_ONLY :
+ var->data.image_write_only ? GL_WRITE_ONLY :
+ GL_READ_WRITE);
+
+ for (unsigned j = 0; j < MAX2(1, storage->array_elements); ++j)
+ sh->ImageAccess[index + j] = access;
+ }
+}
+
+/**
+ * Combine the hidden uniform hash map with the uniform hash map so that the
+ * hidden uniforms will be given indicies at the end of the uniform storage
+ * array.
+ */
+static void
+assign_hidden_uniform_slot_id(const char *name, unsigned hidden_id,
+ void *closure)
+{
+ count_uniform_size *uniform_size = (count_uniform_size *) closure;
+ unsigned hidden_uniform_start = uniform_size->num_active_uniforms -
+ uniform_size->num_hidden_uniforms;
- offset = align(offset, alignment);
- ubo_var->Offset = offset;
- offset += size;
+ uniform_size->map->put(hidden_uniform_start + hidden_id, name);
+}
+
+/**
+ * Search UniformRemapTable for empty block big enough to hold given uniform.
+ * TODO Optimize this algorithm later if it turns out to be a major bottleneck.
+ */
+static int
+find_empty_block(struct gl_shader_program *prog,
+ struct gl_uniform_storage *uniform)
+{
+ const unsigned entries = MAX2(1, uniform->array_elements);
+ for (unsigned i = 0, j; i < prog->NumUniformRemapTable; i++) {
+ /* We found empty space in UniformRemapTable. */
+ if (prog->UniformRemapTable[i] == NULL) {
+ for (j = i; j < entries && j < prog->NumUniformRemapTable; j++) {
+ if (prog->UniformRemapTable[j] != NULL) {
+ /* Entries do not fit in this space, continue searching
+ * after this location.
+ */
+ i = j + 1;
+ break;
+ }
+ }
+ /* Entries fit, we can return this location. */
+ if (i != j + 1) {
+ return i;
+ }
}
- block->UniformBufferSize = offset;
}
+ return -1;
}
void
-link_assign_uniform_locations(struct gl_shader_program *prog)
+link_assign_uniform_locations(struct gl_shader_program *prog,
+ unsigned int boolean_true,
+ unsigned int max_locations)
{
ralloc_free(prog->UniformStorage);
prog->UniformStorage = NULL;
- prog->NumUserUniformStorage = 0;
+ prog->NumUniformStorage = 0;
if (prog->UniformHash != NULL) {
prog->UniformHash->clear();
prog->UniformHash = new string_to_uint_map;
}
- /* Uniforms that lack an initializer in the shader code have an initial
- * value of zero. This includes sampler uniforms.
- *
- * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says:
- *
- * "The link time initial value is either the value of the variable's
- * initializer, if present, or 0 if no initializer is present. Sampler
- * types cannot have initializers."
- */
- memset(prog->SamplerUnits, 0, sizeof(prog->SamplerUnits));
-
- for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
- if (prog->_LinkedShaders[i] == NULL)
- continue;
-
- if (!link_update_uniform_buffer_variables(prog->_LinkedShaders[i]))
- return;
- }
-
/* First pass: Count the uniform resources used by the user-defined
* uniforms. While this happens, each active uniform will have an index
* assigned to it.
* Note: this is *NOT* the index that is returned to the application by
* glGetUniformLocation.
*/
- count_uniform_size uniform_size(prog->UniformHash);
- for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
- if (prog->_LinkedShaders[i] == NULL)
+ struct string_to_uint_map *hiddenUniforms = new string_to_uint_map;
+ count_uniform_size uniform_size(prog->UniformHash, hiddenUniforms);
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_shader *sh = prog->_LinkedShaders[i];
+
+ if (sh == NULL)
continue;
+ /* Uniforms that lack an initializer in the shader code have an initial
+ * value of zero. This includes sampler uniforms.
+ *
+ * Page 24 (page 30 of the PDF) of the GLSL 1.20 spec says:
+ *
+ * "The link time initial value is either the value of the variable's
+ * initializer, if present, or 0 if no initializer is present. Sampler
+ * types cannot have initializers."
+ */
+ memset(sh->SamplerUnits, 0, sizeof(sh->SamplerUnits));
+ memset(sh->ImageUnits, 0, sizeof(sh->ImageUnits));
+
+ link_update_uniform_buffer_variables(sh);
+
/* Reset various per-shader target counts.
*/
uniform_size.start_shader();
- foreach_list(node, prog->_LinkedShaders[i]->ir) {
- ir_variable *const var = ((ir_instruction *) node)->as_variable();
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *const var = node->as_variable();
- if ((var == NULL) || (var->mode != ir_var_uniform))
- continue;
-
- /* FINISHME: Update code to process built-in uniforms!
- */
- if (strncmp("gl_", var->name, 3) == 0)
+ if ((var == NULL) || (var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_shader_storage))
continue;
uniform_size.process(var);
}
- prog->_LinkedShaders[i]->num_samplers = uniform_size.num_shader_samplers;
- prog->_LinkedShaders[i]->num_uniform_components =
- uniform_size.num_shader_uniform_components;
+ sh->num_samplers = uniform_size.num_shader_samplers;
+ sh->NumImages = uniform_size.num_shader_images;
+ sh->num_uniform_components = uniform_size.num_shader_uniform_components;
+ sh->num_combined_uniform_components = sh->num_uniform_components;
+
+ for (unsigned i = 0; i < sh->NumBufferInterfaceBlocks; i++) {
+ if (!sh->BufferInterfaceBlocks[i].IsShaderStorage) {
+ sh->num_combined_uniform_components +=
+ sh->BufferInterfaceBlocks[i].UniformBufferSize / 4;
+ }
+ }
}
- const unsigned num_user_uniforms = uniform_size.num_active_uniforms;
+ const unsigned num_uniforms = uniform_size.num_active_uniforms;
const unsigned num_data_slots = uniform_size.num_values;
+ const unsigned hidden_uniforms = uniform_size.num_hidden_uniforms;
+
+ /* assign hidden uniforms a slot id */
+ hiddenUniforms->iterate(assign_hidden_uniform_slot_id, &uniform_size);
+ delete hiddenUniforms;
/* On the outside chance that there were no uniforms, bail out.
*/
- if (num_user_uniforms == 0)
+ if (num_uniforms == 0)
return;
struct gl_uniform_storage *uniforms =
- rzalloc_array(prog, struct gl_uniform_storage, num_user_uniforms);
+ rzalloc_array(prog, struct gl_uniform_storage, num_uniforms);
union gl_constant_value *data =
rzalloc_array(uniforms, union gl_constant_value, num_data_slots);
#ifndef NDEBUG
parcel_out_uniform_storage parcel(prog->UniformHash, uniforms, data);
- for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+ unsigned total_entries = 0;
+
+ /* Calculate amount of 'holes' left after explicit locations were
+ * reserved from UniformRemapTable.
+ */
+ unsigned empty_locs = 0;
+ for (unsigned i = 0; i < prog->NumUniformRemapTable; i++)
+ if (prog->UniformRemapTable[i] == NULL)
+ empty_locs++;
+
+ /* Add all the reserved explicit locations - empty locations in remap table. */
+ if (prog->NumUniformRemapTable)
+ total_entries = (prog->NumUniformRemapTable - 1) - empty_locs;
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
if (prog->_LinkedShaders[i] == NULL)
continue;
- /* Reset various per-shader target counts.
- */
- parcel.start_shader();
-
- foreach_list(node, prog->_LinkedShaders[i]->ir) {
- ir_variable *const var = ((ir_instruction *) node)->as_variable();
+ parcel.start_shader((gl_shader_stage)i);
- if ((var == NULL) || (var->mode != ir_var_uniform))
- continue;
+ foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
+ ir_variable *const var = node->as_variable();
- /* FINISHME: Update code to process built-in uniforms!
- */
- if (strncmp("gl_", var->name, 3) == 0)
+ if ((var == NULL) || (var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_shader_storage))
continue;
- parcel.set_and_process(prog->_LinkedShaders[i], var);
+ parcel.set_and_process(prog, var);
}
prog->_LinkedShaders[i]->active_samplers = parcel.shader_samplers_used;
prog->_LinkedShaders[i]->shadow_samplers = parcel.shader_shadow_samplers;
+
+ STATIC_ASSERT(sizeof(prog->_LinkedShaders[i]->SamplerTargets) ==
+ sizeof(parcel.targets));
+ memcpy(prog->_LinkedShaders[i]->SamplerTargets, parcel.targets,
+ sizeof(prog->_LinkedShaders[i]->SamplerTargets));
}
- assert(sizeof(prog->SamplerTargets) == sizeof(parcel.targets));
- memcpy(prog->SamplerTargets, parcel.targets, sizeof(prog->SamplerTargets));
+ /* Reserve all the explicit locations of the active uniforms. */
+ for (unsigned i = 0; i < num_uniforms; i++) {
+ if (uniforms[i].type->is_subroutine() ||
+ uniforms[i].is_shader_storage)
+ continue;
+
+ if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC) {
+ /* How many new entries for this uniform? */
+ const unsigned entries = MAX2(1, uniforms[i].array_elements);
+
+ /* Set remap table entries point to correct gl_uniform_storage. */
+ for (unsigned j = 0; j < entries; j++) {
+ unsigned element_loc = uniforms[i].remap_location + j;
+ assert(prog->UniformRemapTable[element_loc] ==
+ INACTIVE_UNIFORM_EXPLICIT_LOCATION);
+ prog->UniformRemapTable[element_loc] = &uniforms[i];
+ }
+ }
+ }
+
+ /* Reserve locations for rest of the uniforms. */
+ for (unsigned i = 0; i < num_uniforms; i++) {
+
+ if (uniforms[i].type->is_subroutine() ||
+ uniforms[i].is_shader_storage)
+ continue;
+
+ /* Built-in uniforms should not get any location. */
+ if (uniforms[i].builtin)
+ continue;
+
+ /* Explicit ones have been set already. */
+ if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC)
+ continue;
+
+ /* how many new entries for this uniform? */
+ const unsigned entries = MAX2(1, uniforms[i].array_elements);
+
+ /* Find UniformRemapTable for empty blocks where we can fit this uniform. */
+ int chosen_location = -1;
+
+ if (empty_locs)
+ chosen_location = find_empty_block(prog, &uniforms[i]);
+
+ if (chosen_location != -1) {
+ empty_locs -= entries;
+ } else {
+ chosen_location = prog->NumUniformRemapTable;
+
+ /* Add new entries to the total amount of entries. */
+ total_entries += entries;
+
+ /* resize remap table to fit new entries */
+ prog->UniformRemapTable =
+ reralloc(prog,
+ prog->UniformRemapTable,
+ gl_uniform_storage *,
+ prog->NumUniformRemapTable + entries);
+ prog->NumUniformRemapTable += entries;
+ }
+
+ /* set pointers for this uniform */
+ for (unsigned j = 0; j < entries; j++)
+ prog->UniformRemapTable[chosen_location + j] = &uniforms[i];
+
+ /* set the base location in remap table for the uniform */
+ uniforms[i].remap_location = chosen_location;
+ }
+
+ /* Verify that total amount of entries for explicit and implicit locations
+ * is less than MAX_UNIFORM_LOCATIONS.
+ */
+ if (total_entries >= max_locations) {
+ linker_error(prog, "count of uniform locations >= MAX_UNIFORM_LOCATIONS"
+ "(%u >= %u)", total_entries, max_locations);
+ }
+
+ /* Reserve all the explicit locations of the active subroutine uniforms. */
+ for (unsigned i = 0; i < num_uniforms; i++) {
+ if (!uniforms[i].type->is_subroutine())
+ continue;
+
+ if (uniforms[i].remap_location == UNMAPPED_UNIFORM_LOC)
+ continue;
+
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
+ struct gl_shader *sh = prog->_LinkedShaders[j];
+ if (!sh)
+ continue;
+
+ if (!uniforms[i].opaque[j].active)
+ continue;
+
+ /* How many new entries for this uniform? */
+ const unsigned entries = MAX2(1, uniforms[i].array_elements);
+
+ /* Set remap table entries point to correct gl_uniform_storage. */
+ for (unsigned k = 0; k < entries; k++) {
+ unsigned element_loc = uniforms[i].remap_location + k;
+ assert(sh->SubroutineUniformRemapTable[element_loc] ==
+ INACTIVE_UNIFORM_EXPLICIT_LOCATION);
+ sh->SubroutineUniformRemapTable[element_loc] = &uniforms[i];
+ }
+ }
+ }
+
+ /* reserve subroutine locations */
+ for (unsigned i = 0; i < num_uniforms; i++) {
+
+ if (!uniforms[i].type->is_subroutine())
+ continue;
+ const unsigned entries = MAX2(1, uniforms[i].array_elements);
+
+ if (uniforms[i].remap_location != UNMAPPED_UNIFORM_LOC)
+ continue;
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
+ struct gl_shader *sh = prog->_LinkedShaders[j];
+ if (!sh)
+ continue;
+
+ if (!uniforms[i].opaque[j].active)
+ continue;
+
+ sh->SubroutineUniformRemapTable =
+ reralloc(sh,
+ sh->SubroutineUniformRemapTable,
+ gl_uniform_storage *,
+ sh->NumSubroutineUniformRemapTable + entries);
+
+ for (unsigned k = 0; k < entries; k++)
+ sh->SubroutineUniformRemapTable[sh->NumSubroutineUniformRemapTable + k] = &uniforms[i];
+ uniforms[i].remap_location = sh->NumSubroutineUniformRemapTable;
+ sh->NumSubroutineUniformRemapTable += entries;
+ }
+ }
#ifndef NDEBUG
- for (unsigned i = 0; i < num_user_uniforms; i++) {
- assert(uniforms[i].storage != NULL);
+ for (unsigned i = 0; i < num_uniforms; i++) {
+ assert(uniforms[i].storage != NULL || uniforms[i].builtin);
}
assert(parcel.values == data_end);
#endif
- prog->NumUserUniformStorage = num_user_uniforms;
+ prog->NumUniformStorage = num_uniforms;
+ prog->NumHiddenUniforms = hidden_uniforms;
prog->UniformStorage = uniforms;
- link_set_uniform_initializers(prog);
+ /**
+ * Scan the program for image uniforms and store image unit access
+ * information into the gl_shader data structure.
+ */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ gl_shader *sh = prog->_LinkedShaders[i];
+
+ if (sh == NULL)
+ continue;
+
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *var = node->as_variable();
+
+ if (var && var->data.mode == ir_var_uniform &&
+ var->type->contains_image()) {
+ char *name_copy = ralloc_strdup(NULL, var->name);
+ link_set_image_access_qualifiers(prog, sh, i, var, var->type,
+ &name_copy, strlen(var->name));
+ ralloc_free(name_copy);
+ }
+ }
+ }
+
+ link_set_uniform_initializers(prog, boolean_true);
return;
}