}
/**
- * Accumulates the array of prog->BufferInterfaceBlocks and checks that all
- * definitons of blocks agree on their contents.
+ * Accumulates the array of buffer blocks and checks that all definitions of
+ * blocks agree on their contents.
*/
static bool
-interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog)
+interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog,
+ bool validate_ssbo)
{
- unsigned max_num_uniform_blocks = 0;
+ int *InterfaceBlockStageIndex[MESA_SHADER_STAGES];
+ struct gl_uniform_block *blks = NULL;
+ unsigned *num_blks = validate_ssbo ? &prog->NumShaderStorageBlocks :
+ &prog->NumUniformBlocks;
+
+ unsigned max_num_buffer_blocks = 0;
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- if (prog->_LinkedShaders[i])
- max_num_uniform_blocks += prog->_LinkedShaders[i]->NumBufferInterfaceBlocks;
+ if (prog->_LinkedShaders[i]) {
+ if (validate_ssbo) {
+ max_num_buffer_blocks +=
+ prog->_LinkedShaders[i]->NumShaderStorageBlocks;
+ } else {
+ max_num_buffer_blocks +=
+ prog->_LinkedShaders[i]->NumUniformBlocks;
+ }
+ }
}
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
struct gl_shader *sh = prog->_LinkedShaders[i];
- prog->InterfaceBlockStageIndex[i] = ralloc_array(prog, int,
- max_num_uniform_blocks);
- for (unsigned int j = 0; j < max_num_uniform_blocks; j++)
- prog->InterfaceBlockStageIndex[i][j] = -1;
+ InterfaceBlockStageIndex[i] = new int[max_num_buffer_blocks];
+ for (unsigned int j = 0; j < max_num_buffer_blocks; j++)
+ InterfaceBlockStageIndex[i][j] = -1;
if (sh == NULL)
continue;
- for (unsigned int j = 0; j < sh->NumBufferInterfaceBlocks; j++) {
- int index = link_cross_validate_uniform_block(prog,
- &prog->BufferInterfaceBlocks,
- &prog->NumBufferInterfaceBlocks,
- &sh->BufferInterfaceBlocks[j]);
+ unsigned sh_num_blocks;
+ struct gl_uniform_block **sh_blks;
+ if (validate_ssbo) {
+ sh_num_blocks = prog->_LinkedShaders[i]->NumShaderStorageBlocks;
+ sh_blks = sh->ShaderStorageBlocks;
+ } else {
+ sh_num_blocks = prog->_LinkedShaders[i]->NumUniformBlocks;
+ sh_blks = sh->UniformBlocks;
+ }
- if (index == -1) {
- linker_error(prog, "uniform block `%s' has mismatching definitions\n",
- sh->BufferInterfaceBlocks[j].Name);
- return false;
- }
+ for (unsigned int j = 0; j < sh_num_blocks; j++) {
+ int index = link_cross_validate_uniform_block(prog, &blks, num_blks,
+ sh_blks[j]);
+
+ if (index == -1) {
+ linker_error(prog, "buffer block `%s' has mismatching "
+ "definitions\n", sh_blks[j]->Name);
+
+ for (unsigned k = 0; k <= i; k++) {
+ delete[] InterfaceBlockStageIndex[k];
+ }
+ return false;
+ }
- prog->InterfaceBlockStageIndex[i][index] = j;
+ InterfaceBlockStageIndex[i][index] = j;
}
}
+ /* Update per stage block pointers to point to the program list.
+ * FIXME: We should be able to free the per stage blocks here.
+ */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ for (unsigned j = 0; j < *num_blks; j++) {
+ int stage_index = InterfaceBlockStageIndex[i][j];
+
+ if (stage_index != -1) {
+ struct gl_shader *sh = prog->_LinkedShaders[i];
+
+ blks[j].stageref |= (1 << i);
+
+ struct gl_uniform_block **sh_blks = validate_ssbo ?
+ sh->ShaderStorageBlocks : sh->UniformBlocks;
+
+ sh_blks[stage_index] = &blks[j];
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ delete[] InterfaceBlockStageIndex[i];
+ }
+
+ if (validate_ssbo)
+ prog->ShaderStorageBlocks = blks;
+ else
+ prog->UniformBlocks = blks;
+
return true;
}
hash_table *unnamed_interfaces;
};
+/**
+ * Check for conflicting xfb_stride default qualifiers and store buffer stride
+ * for later use.
+ */
+static void
+link_xfb_stride_layout_qualifiers(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_shader *linked_shader,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
+ linked_shader->TransformFeedback.BufferStride[i] = 0;
+ }
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+ struct gl_shader *shader = shader_list[i];
+
+ for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
+ if (shader->TransformFeedback.BufferStride[j]) {
+ if (linked_shader->TransformFeedback.BufferStride[j] != 0 &&
+ shader->TransformFeedback.BufferStride[j] != 0 &&
+ linked_shader->TransformFeedback.BufferStride[j] !=
+ shader->TransformFeedback.BufferStride[j]) {
+ linker_error(prog,
+ "intrastage shaders defined with conflicting "
+ "xfb_stride for buffer %d (%d and %d)\n", j,
+ linked_shader->TransformFeedback.BufferStride[j],
+ shader->TransformFeedback.BufferStride[j]);
+ return;
+ }
+
+ if (shader->TransformFeedback.BufferStride[j])
+ linked_shader->TransformFeedback.BufferStride[j] =
+ shader->TransformFeedback.BufferStride[j];
+ }
+ }
+ }
+
+ for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
+ if (linked_shader->TransformFeedback.BufferStride[j]) {
+ prog->TransformFeedback.BufferStride[j] =
+ linked_shader->TransformFeedback.BufferStride[j];
+
+ /* We will validate doubles at a later stage */
+ if (prog->TransformFeedback.BufferStride[j] % 4) {
+ linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
+ "multiple of 4 or if its applied to a type that is "
+ "or contains a double a multiple of 8.",
+ prog->TransformFeedback.BufferStride[j]);
+ return;
+ }
+
+ if (prog->TransformFeedback.BufferStride[j] / 4 >
+ ctx->Const.MaxTransformFeedbackInterleavedComponents) {
+ linker_error(prog,
+ "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
+ "limit has been exceeded.");
+ return;
+ }
+ }
+ }
+}
/**
* Performs the cross-validation of tessellation control shader vertices and
struct gl_shader **shader_list,
unsigned num_shaders)
{
- struct gl_uniform_block *uniform_blocks = NULL;
+ struct gl_uniform_block *ubo_blocks = NULL;
+ struct gl_uniform_block *ssbo_blocks = NULL;
+ unsigned num_ubo_blocks = 0;
+ unsigned num_ssbo_blocks = 0;
/* Check that global variables defined in multiple shaders are consistent.
*/
return NULL;
/* Link up uniform blocks defined within this stage. */
- const unsigned num_uniform_blocks =
- link_uniform_blocks(mem_ctx, ctx, prog, shader_list, num_shaders,
- &uniform_blocks);
+ link_uniform_blocks(mem_ctx, ctx, prog, shader_list, num_shaders,
+ &ubo_blocks, &num_ubo_blocks, &ssbo_blocks,
+ &num_ssbo_blocks);
+
if (!prog->LinkStatus)
return NULL;
linked->ir = new(linked) exec_list;
clone_ir_list(mem_ctx, linked->ir, main->ir);
- linked->BufferInterfaceBlocks = uniform_blocks;
- linked->NumBufferInterfaceBlocks = num_uniform_blocks;
- ralloc_steal(linked, linked->BufferInterfaceBlocks);
+ /* Copy ubo blocks to linked shader list */
+ linked->UniformBlocks =
+ ralloc_array(linked, gl_uniform_block *, num_ubo_blocks);
+ ralloc_steal(linked, ubo_blocks);
+ for (unsigned i = 0; i < num_ubo_blocks; i++) {
+ linked->UniformBlocks[i] = &ubo_blocks[i];
+ }
+ linked->NumUniformBlocks = num_ubo_blocks;
+
+ /* Copy ssbo blocks to linked shader list */
+ linked->ShaderStorageBlocks =
+ ralloc_array(linked, gl_uniform_block *, num_ssbo_blocks);
+ ralloc_steal(linked, ssbo_blocks);
+ for (unsigned i = 0; i < num_ssbo_blocks; i++) {
+ linked->ShaderStorageBlocks[i] = &ssbo_blocks[i];
+ }
+ linked->NumShaderStorageBlocks = num_ssbo_blocks;
link_fs_input_layout_qualifiers(prog, linked, shader_list, num_shaders);
link_tcs_out_layout_qualifiers(prog, linked, shader_list, num_shaders);
link_tes_in_layout_qualifiers(prog, linked, shader_list, num_shaders);
link_gs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
link_cs_input_layout_qualifiers(prog, linked, shader_list, num_shaders);
+ link_xfb_stride_layout_qualifiers(ctx, prog, linked, shader_list,
+ num_shaders);
populate_symbol_table(linked);
/* Reversed because we want a descending order sort below. */
return r->slots - l->slots;
}
- } to_assign[16];
+ } to_assign[32];
+ assert(max_index <= 32);
unsigned num_attr = 0;
return false;
}
- const unsigned slots = var->type->count_attribute_slots(target_index == MESA_SHADER_VERTEX ? true : false);
+ const unsigned slots = var->type->count_attribute_slots(target_index == MESA_SHADER_VERTEX);
/* If the variable is not a built-in and has a location statically
* assigned in the shader (presumably via a layout qualifier), make sure
continue;
}
+ if (num_attr >= max_index) {
+ linker_error(prog, "too many %s (max %u)",
+ target_index == MESA_SHADER_VERTEX ?
+ "vertex shader inputs" : "fragment shader outputs",
+ max_index);
+ return false;
+ }
to_assign[num_attr].slots = slots;
to_assign[num_attr].var = var;
num_attr++;
static void
check_resources(struct gl_context *ctx, struct gl_shader_program *prog)
{
+ unsigned total_uniform_blocks = 0;
+ unsigned total_shader_storage_blocks = 0;
+
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
struct gl_shader *sh = prog->_LinkedShaders[i];
_mesa_shader_stage_to_string(i));
}
}
- }
- unsigned blocks[MESA_SHADER_STAGES] = {0};
- unsigned total_uniform_blocks = 0;
- unsigned shader_blocks[MESA_SHADER_STAGES] = {0};
- unsigned total_shader_storage_blocks = 0;
+ total_shader_storage_blocks += sh->NumShaderStorageBlocks;
+ total_uniform_blocks += sh->NumUniformBlocks;
- for (unsigned i = 0; i < prog->NumBufferInterfaceBlocks; i++) {
- /* Don't check SSBOs for Uniform Block Size */
- if (!prog->BufferInterfaceBlocks[i].IsShaderStorage &&
- prog->BufferInterfaceBlocks[i].UniformBufferSize > ctx->Const.MaxUniformBlockSize) {
- linker_error(prog, "Uniform block %s too big (%d/%d)\n",
- prog->BufferInterfaceBlocks[i].Name,
- prog->BufferInterfaceBlocks[i].UniformBufferSize,
- ctx->Const.MaxUniformBlockSize);
+ const unsigned max_uniform_blocks =
+ ctx->Const.Program[i].MaxUniformBlocks;
+ if (max_uniform_blocks < sh->NumUniformBlocks) {
+ linker_error(prog, "Too many %s uniform blocks (%d/%d)\n",
+ _mesa_shader_stage_to_string(i), sh->NumUniformBlocks,
+ max_uniform_blocks);
}
- if (prog->BufferInterfaceBlocks[i].IsShaderStorage &&
- prog->BufferInterfaceBlocks[i].UniformBufferSize > ctx->Const.MaxShaderStorageBlockSize) {
- linker_error(prog, "Shader storage block %s too big (%d/%d)\n",
- prog->BufferInterfaceBlocks[i].Name,
- prog->BufferInterfaceBlocks[i].UniformBufferSize,
- ctx->Const.MaxShaderStorageBlockSize);
+ const unsigned max_shader_storage_blocks =
+ ctx->Const.Program[i].MaxShaderStorageBlocks;
+ if (max_shader_storage_blocks < sh->NumShaderStorageBlocks) {
+ linker_error(prog, "Too many %s shader storage blocks (%d/%d)\n",
+ _mesa_shader_stage_to_string(i),
+ sh->NumShaderStorageBlocks, max_shader_storage_blocks);
}
+ }
- for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
- if (prog->InterfaceBlockStageIndex[j][i] != -1) {
- struct gl_shader *sh = prog->_LinkedShaders[j];
- int stage_index = prog->InterfaceBlockStageIndex[j][i];
- if (sh && sh->BufferInterfaceBlocks[stage_index].IsShaderStorage) {
- shader_blocks[j]++;
- total_shader_storage_blocks++;
- } else {
- blocks[j]++;
- total_uniform_blocks++;
- }
- }
- }
+ if (total_uniform_blocks > ctx->Const.MaxCombinedUniformBlocks) {
+ linker_error(prog, "Too many combined uniform blocks (%d/%d)\n",
+ total_uniform_blocks, ctx->Const.MaxCombinedUniformBlocks);
+ }
- if (total_uniform_blocks > ctx->Const.MaxCombinedUniformBlocks) {
- linker_error(prog, "Too many combined uniform blocks (%d/%d)\n",
- total_uniform_blocks,
- ctx->Const.MaxCombinedUniformBlocks);
- } else {
- for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- const unsigned max_uniform_blocks =
- ctx->Const.Program[i].MaxUniformBlocks;
- if (blocks[i] > max_uniform_blocks) {
- linker_error(prog, "Too many %s uniform blocks (%d/%d)\n",
- _mesa_shader_stage_to_string(i),
- blocks[i],
- max_uniform_blocks);
- break;
- }
- }
+ if (total_shader_storage_blocks > ctx->Const.MaxCombinedShaderStorageBlocks) {
+ linker_error(prog, "Too many combined shader storage blocks (%d/%d)\n",
+ total_shader_storage_blocks,
+ ctx->Const.MaxCombinedShaderStorageBlocks);
+ }
+
+ for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
+ if (prog->UniformBlocks[i].UniformBufferSize >
+ ctx->Const.MaxUniformBlockSize) {
+ linker_error(prog, "Uniform block %s too big (%d/%d)\n",
+ prog->UniformBlocks[i].Name,
+ prog->UniformBlocks[i].UniformBufferSize,
+ ctx->Const.MaxUniformBlockSize);
}
+ }
- if (total_shader_storage_blocks > ctx->Const.MaxCombinedShaderStorageBlocks) {
- linker_error(prog, "Too many combined shader storage blocks (%d/%d)\n",
- total_shader_storage_blocks,
- ctx->Const.MaxCombinedShaderStorageBlocks);
- } else {
- for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- const unsigned max_shader_storage_blocks =
- ctx->Const.Program[i].MaxShaderStorageBlocks;
- if (shader_blocks[i] > max_shader_storage_blocks) {
- linker_error(prog, "Too many %s shader storage blocks (%d/%d)\n",
- _mesa_shader_stage_to_string(i),
- shader_blocks[i],
- max_shader_storage_blocks);
- break;
- }
- }
+ for (unsigned i = 0; i < prog->NumShaderStorageBlocks; i++) {
+ if (prog->ShaderStorageBlocks[i].UniformBufferSize >
+ ctx->Const.MaxShaderStorageBlockSize) {
+ linker_error(prog, "Shader storage block %s too big (%d/%d)\n",
+ prog->ShaderStorageBlocks[i].Name,
+ prog->ShaderStorageBlocks[i].UniformBufferSize,
+ ctx->Const.MaxShaderStorageBlockSize);
}
}
}
ctx->Const.Program[i].MaxImageUniforms);
total_image_units += sh->NumImages;
-
- for (unsigned j = 0; j < prog->NumBufferInterfaceBlocks; j++) {
- int stage_index = prog->InterfaceBlockStageIndex[i][j];
- if (stage_index != -1 && sh->BufferInterfaceBlocks[stage_index].IsShaderStorage)
- total_shader_storage_blocks++;
- }
+ total_shader_storage_blocks += sh->NumShaderStorageBlocks;
if (i == MESA_SHADER_FRAGMENT) {
foreach_in_list(ir_instruction, node, sh->ir) {
* for a variable, checks for overlaps between other uniforms using explicit
* locations.
*/
-static bool
+static int
reserve_explicit_locations(struct gl_shader_program *prog,
string_to_uint_map *map, ir_variable *var)
{
unsigned slots = var->type->uniform_locations();
unsigned max_loc = var->data.location + slots - 1;
+ unsigned return_value = slots;
/* Resize remap table if locations do not fit in the current one. */
if (max_loc + 1 > prog->NumUniformRemapTable) {
if (!prog->UniformRemapTable) {
linker_error(prog, "Out of memory during linking.\n");
- return false;
+ return -1;
}
/* Initialize allocated space. */
/* Possibly same uniform from a different stage, this is ok. */
unsigned hash_loc;
- if (map->get(hash_loc, var->name) && hash_loc == loc - i)
- continue;
+ if (map->get(hash_loc, var->name) && hash_loc == loc - i) {
+ return_value = 0;
+ continue;
+ }
/* ARB_explicit_uniform_location specification states:
*
"location qualifier for uniform %s overlaps "
"previously used location\n",
var->name);
- return false;
+ return -1;
}
/* Initialize location as inactive before optimization
/* Note, base location used for arrays. */
map->put(var->data.location, var->name);
- return true;
+ return return_value;
}
static bool
* any optimizations happen to handle also inactive uniforms and
* inactive array elements that may get trimmed away.
*/
-static void
+static unsigned
check_explicit_uniform_locations(struct gl_context *ctx,
struct gl_shader_program *prog)
{
if (!ctx->Extensions.ARB_explicit_uniform_location)
- return;
+ return 0;
/* This map is used to detect if overlapping explicit locations
* occur with the same uniform (from different stage) or a different one.
if (!uniform_map) {
linker_error(prog, "Out of memory during linking.\n");
- return;
+ return 0;
}
unsigned entries_total = 0;
if (!var || var->data.mode != ir_var_uniform)
continue;
- entries_total += var->type->uniform_locations();
-
if (var->data.explicit_location) {
- bool ret;
+ bool ret = false;
if (var->type->without_array()->is_subroutine())
ret = reserve_subroutine_explicit_locations(prog, sh, var);
- else
- ret = reserve_explicit_locations(prog, uniform_map, var);
+ else {
+ int slots = reserve_explicit_locations(prog, uniform_map,
+ var);
+ if (slots != -1) {
+ ret = true;
+ entries_total += slots;
+ }
+ }
if (!ret) {
delete uniform_map;
- return;
+ return 0;
}
}
}
}
- /* Verify that total amount of entries for explicit and implicit locations
- * is less than MAX_UNIFORM_LOCATIONS.
- */
- if (entries_total >= ctx->Const.MaxUserAssignableUniformLocations) {
- linker_error(prog, "count of uniform locations >= MAX_UNIFORM_LOCATIONS"
- "(%u >= %u)", entries_total,
- ctx->Const.MaxUserAssignableUniformLocations);
+ struct empty_uniform_block *current_block = NULL;
+
+ for (unsigned i = 0; i < prog->NumUniformRemapTable; i++) {
+ /* We found empty space in UniformRemapTable. */
+ if (prog->UniformRemapTable[i] == NULL) {
+ /* We've found the beginning of a new continous block of empty slots */
+ if (!current_block || current_block->start + current_block->slots != i) {
+ current_block = rzalloc(prog, struct empty_uniform_block);
+ current_block->start = i;
+ exec_list_push_tail(&prog->EmptyUniformLocations,
+ ¤t_block->link);
+ }
+
+ /* The current block continues, so we simply increment its slots */
+ current_block->slots++;
+ }
}
+
delete uniform_map;
+ return entries_total;
}
static bool
if (type != GL_BUFFER_VARIABLE)
return true;
- for (unsigned i = 0; i < shProg->NumBufferInterfaceBlocks; i++) {
- const char *block_name = shProg->BufferInterfaceBlocks[i].Name;
+ for (unsigned i = 0; i < shProg->NumShaderStorageBlocks; i++) {
+ const char *block_name = shProg->ShaderStorageBlocks[i].Name;
block_name_len = strlen(block_name);
const char *block_square_bracket = strchr(block_name, '[');
* Create gl_shader_variable from ir_variable class.
*/
static gl_shader_variable *
-create_shader_variable(struct gl_shader_program *shProg, const ir_variable *in)
+create_shader_variable(struct gl_shader_program *shProg,
+ const ir_variable *in,
+ const char *name, const glsl_type *type,
+ bool use_implicit_location, int location)
{
gl_shader_variable *out = ralloc(shProg, struct gl_shader_variable);
if (!out)
return NULL;
- out->type = in->type;
- out->name = ralloc_strdup(shProg, in->name);
+ /* Since gl_VertexID may be lowered to gl_VertexIDMESA, but applications
+ * expect to see gl_VertexID in the program resource list. Pretend.
+ */
+ if (in->data.mode == ir_var_system_value &&
+ in->data.location == SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) {
+ out->name = ralloc_strdup(shProg, "gl_VertexID");
+ } else {
+ out->name = ralloc_strdup(shProg, name);
+ }
if (!out->name)
return NULL;
- out->location = in->data.location;
+ /* From the ARB_program_interface_query specification:
+ *
+ * "Not all active variables are assigned valid locations; the
+ * following variables will have an effective location of -1:
+ *
+ * * uniforms declared as atomic counters;
+ *
+ * * members of a uniform block;
+ *
+ * * built-in inputs, outputs, and uniforms (starting with "gl_"); and
+ *
+ * * inputs or outputs not declared with a "location" layout qualifier,
+ * except for vertex shader inputs and fragment shader outputs."
+ */
+ if (in->type->base_type == GLSL_TYPE_ATOMIC_UINT ||
+ is_gl_identifier(in->name) ||
+ !(in->data.explicit_location || use_implicit_location)) {
+ out->location = -1;
+ } else {
+ out->location = location;
+ }
+
+ out->type = type;
out->index = in->data.index;
out->patch = in->data.patch;
out->mode = in->data.mode;
return out;
}
+static bool
+add_shader_variable(struct gl_shader_program *shProg, unsigned stage_mask,
+ GLenum programInterface, ir_variable *var,
+ const char *name, const glsl_type *type,
+ bool use_implicit_location, int location)
+{
+ const bool is_vertex_input =
+ programInterface == GL_PROGRAM_INPUT &&
+ stage_mask == MESA_SHADER_VERTEX;
+
+ switch (type->base_type) {
+ case GLSL_TYPE_STRUCT: {
+ /* From the ARB_program_interface_query specification:
+ *
+ * "For an active variable declared as a structure, a separate entry
+ * will be generated for each active structure member. The name of
+ * each entry is formed by concatenating the name of the structure,
+ * the "." character, and the name of the structure member. If a
+ * structure member to enumerate is itself a structure or array, these
+ * enumeration rules are applied recursively."
+ */
+ unsigned field_location = location;
+ for (unsigned i = 0; i < type->length; i++) {
+ const struct glsl_struct_field *field = &type->fields.structure[i];
+ char *field_name = ralloc_asprintf(shProg, "%s.%s", name, field->name);
+ if (!add_shader_variable(shProg, stage_mask, programInterface,
+ var, field_name, field->type,
+ use_implicit_location, field_location))
+ return false;
+
+ field_location +=
+ field->type->count_attribute_slots(is_vertex_input);
+ }
+ return true;
+ }
+
+ default: {
+ /* From the ARB_program_interface_query specification:
+ *
+ * "For an active variable declared as a single instance of a basic
+ * type, a single entry will be generated, using the variable name
+ * from the shader source."
+ */
+ gl_shader_variable *sha_v =
+ create_shader_variable(shProg, var, name, type,
+ use_implicit_location, location);
+ if (!sha_v)
+ return false;
+
+ return add_program_resource(shProg, programInterface, sha_v, stage_mask);
+ }
+ }
+}
+
static bool
add_interface_variables(struct gl_shader_program *shProg,
- exec_list *ir, GLenum programInterface)
+ unsigned stage, GLenum programInterface)
{
+ exec_list *ir = shProg->_LinkedShaders[stage]->ir;
+
foreach_in_list(ir_instruction, node, ir) {
ir_variable *var = node->as_variable();
- uint8_t mask = 0;
- if (!var)
+ if (!var || var->data.how_declared == ir_var_hidden)
continue;
+ int loc_bias;
+
switch (var->data.mode) {
- /* From GL 4.3 core spec, section 11.1.1 (Vertex Attributes):
- * "For GetActiveAttrib, all active vertex shader input variables
- * are enumerated, including the special built-in inputs gl_VertexID
- * and gl_InstanceID."
- */
case ir_var_system_value:
- if (var->data.location != SYSTEM_VALUE_VERTEX_ID &&
- var->data.location != SYSTEM_VALUE_VERTEX_ID_ZERO_BASE &&
- var->data.location != SYSTEM_VALUE_INSTANCE_ID)
- continue;
- /* Mark special built-in inputs referenced by the vertex stage so
- * that they are considered active by the shader queries.
- */
- mask = (1 << (MESA_SHADER_VERTEX));
- /* FALLTHROUGH */
case ir_var_shader_in:
if (programInterface != GL_PROGRAM_INPUT)
continue;
+ loc_bias = (stage == MESA_SHADER_VERTEX) ? int(VERT_ATTRIB_GENERIC0)
+ : int(VARYING_SLOT_VAR0);
break;
case ir_var_shader_out:
if (programInterface != GL_PROGRAM_OUTPUT)
continue;
+ loc_bias = (stage == MESA_SHADER_FRAGMENT) ? int(FRAG_RESULT_DATA0)
+ : int(VARYING_SLOT_VAR0);
break;
default:
continue;
if (strncmp(var->name, "gl_out_FragData", 15) == 0)
continue;
- gl_shader_variable *sha_v = create_shader_variable(shProg, var);
- if (!sha_v)
- return false;
+ const bool vs_input_or_fs_output =
+ (stage == MESA_SHADER_VERTEX && var->data.mode == ir_var_shader_in) ||
+ (stage == MESA_SHADER_FRAGMENT && var->data.mode == ir_var_shader_out);
- if (!add_program_resource(shProg, programInterface, sha_v,
- build_stageref(shProg, sha_v->name,
- sha_v->mode) | mask))
+ if (!add_shader_variable(shProg, 1 << stage, programInterface,
+ var, var->name, var->type, vs_input_or_fs_output,
+ var->data.location - loc_bias))
return false;
}
return true;
}
if (type == iface) {
- gl_shader_variable *sha_v = create_shader_variable(shProg, var);
- if (!sha_v)
- return false;
- if (!add_program_resource(shProg, iface, sha_v,
- build_stageref(shProg, sha_v->name,
- sha_v->mode)))
+ const int stage_mask =
+ build_stageref(shProg, var->name, var->data.mode);
+ if (!add_shader_variable(shProg, stage_mask,
+ iface, var, var->name, var->type, false,
+ var->data.location - VARYING_SLOT_VAR0))
return false;
}
}
ir_variable *var = node->as_variable();
if (var) {
assert(var->data.mode == ir_var_shader_out);
- gl_shader_variable *sha_v = create_shader_variable(shProg, var);
- if (!sha_v)
- return false;
- if (!add_program_resource(shProg, GL_PROGRAM_OUTPUT, sha_v,
- 1 << MESA_SHADER_FRAGMENT))
+
+ if (!add_shader_variable(shProg,
+ 1 << MESA_SHADER_FRAGMENT,
+ GL_PROGRAM_OUTPUT, var, var->name, var->type,
+ true, var->data.location - FRAG_RESULT_DATA0))
return false;
}
}
int array_stride = -1;
char *var_name = get_top_level_name(uni->name);
char *interface_name =
- get_top_level_name(shProg->BufferInterfaceBlocks[block_index].Name);
+ get_top_level_name(uni->is_shader_storage ?
+ shProg->ShaderStorageBlocks[block_index].Name :
+ shProg->UniformBlocks[block_index].Name);
if (strcmp(var_name, interface_name) == 0) {
/* Deal with instanced array of SSBOs */
* resource data.
*/
void
-build_program_resource_list(struct gl_shader_program *shProg)
+build_program_resource_list(struct gl_context *ctx,
+ struct gl_shader_program *shProg)
{
/* Rebuild resource list. */
if (shProg->ProgramResourceList) {
return;
/* Add inputs and outputs to the resource list. */
- if (!add_interface_variables(shProg, shProg->_LinkedShaders[input_stage]->ir,
- GL_PROGRAM_INPUT))
+ if (!add_interface_variables(shProg, input_stage, GL_PROGRAM_INPUT))
return;
- if (!add_interface_variables(shProg, shProg->_LinkedShaders[output_stage]->ir,
- GL_PROGRAM_OUTPUT))
+ if (!add_interface_variables(shProg, output_stage, GL_PROGRAM_OUTPUT))
return;
/* Add transform feedback varyings. */
}
}
+ /* Add transform feedback buffers. */
+ for (unsigned i = 0; i < ctx->Const.MaxTransformFeedbackBuffers; i++) {
+ if ((shProg->LinkedTransformFeedback.ActiveBuffers >> i) & 1) {
+ shProg->LinkedTransformFeedback.Buffers[i].Binding = i;
+ if (!add_program_resource(shProg, GL_TRANSFORM_FEEDBACK_BUFFER,
+ &shProg->LinkedTransformFeedback.Buffers[i],
+ 0))
+ return;
+ }
+ }
+
/* Add uniforms from uniform storage. */
for (unsigned i = 0; i < shProg->NumUniformStorage; i++) {
/* Do not add uniforms internally used by Mesa. */
ir_var_uniform);
/* Add stagereferences for uniforms in a uniform block. */
+ bool is_shader_storage = shProg->UniformStorage[i].is_shader_storage;
int block_index = shProg->UniformStorage[i].block_index;
if (block_index != -1) {
- for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
- if (shProg->InterfaceBlockStageIndex[j][block_index] != -1)
- stageref |= (1 << j);
- }
+ stageref |= is_shader_storage ?
+ shProg->ShaderStorageBlocks[block_index].stageref :
+ shProg->UniformBlocks[block_index].stageref;
}
- bool is_shader_storage = shProg->UniformStorage[i].is_shader_storage;
GLenum type = is_shader_storage ? GL_BUFFER_VARIABLE : GL_UNIFORM;
if (!should_add_buffer_variable(shProg, type,
shProg->UniformStorage[i].name))
return;
}
- /* Add program uniform blocks and shader storage blocks. */
- for (unsigned i = 0; i < shProg->NumBufferInterfaceBlocks; i++) {
- bool is_shader_storage = shProg->BufferInterfaceBlocks[i].IsShaderStorage;
- GLenum type = is_shader_storage ? GL_SHADER_STORAGE_BLOCK : GL_UNIFORM_BLOCK;
- if (!add_program_resource(shProg, type,
- &shProg->BufferInterfaceBlocks[i], 0))
+ /* Add program uniform blocks. */
+ for (unsigned i = 0; i < shProg->NumUniformBlocks; i++) {
+ if (!add_program_resource(shProg, GL_UNIFORM_BLOCK,
+ &shProg->UniformBlocks[i], 0))
+ return;
+ }
+
+ /* Add program shader storage blocks. */
+ for (unsigned i = 0; i < shProg->NumShaderStorageBlocks; i++) {
+ if (!add_program_resource(shProg, GL_SHADER_STORAGE_BLOCK,
+ &shProg->ShaderStorageBlocks[i], 0))
return;
}
}
}
-static void
-split_ubos_and_ssbos(void *mem_ctx,
- struct gl_uniform_block *blocks,
- unsigned num_blocks,
- struct gl_uniform_block ***ubos,
- unsigned *num_ubos,
- unsigned **ubo_interface_block_indices,
- struct gl_uniform_block ***ssbos,
- unsigned *num_ssbos,
- unsigned **ssbo_interface_block_indices)
-{
- unsigned num_ubo_blocks = 0;
- unsigned num_ssbo_blocks = 0;
-
- for (unsigned i = 0; i < num_blocks; i++) {
- if (blocks[i].IsShaderStorage)
- num_ssbo_blocks++;
- else
- num_ubo_blocks++;
- }
-
- *ubos = ralloc_array(mem_ctx, gl_uniform_block *, num_ubo_blocks);
- *num_ubos = 0;
-
- *ssbos = ralloc_array(mem_ctx, gl_uniform_block *, num_ssbo_blocks);
- *num_ssbos = 0;
-
- if (ubo_interface_block_indices)
- *ubo_interface_block_indices =
- ralloc_array(mem_ctx, unsigned, num_ubo_blocks);
-
- if (ssbo_interface_block_indices)
- *ssbo_interface_block_indices =
- ralloc_array(mem_ctx, unsigned, num_ssbo_blocks);
-
- for (unsigned i = 0; i < num_blocks; i++) {
- if (blocks[i].IsShaderStorage) {
- (*ssbos)[*num_ssbos] = &blocks[i];
- if (ssbo_interface_block_indices)
- (*ssbo_interface_block_indices)[*num_ssbos] = i;
- (*num_ssbos)++;
- } else {
- (*ubos)[*num_ubos] = &blocks[i];
- if (ubo_interface_block_indices)
- (*ubo_interface_block_indices)[*num_ubos] = i;
- (*num_ubos)++;
- }
- }
-
- assert(*num_ubos + *num_ssbos == num_blocks);
-}
-
static void
set_always_active_io(exec_list *ir, ir_variable_mode io_mode)
{
return;
}
+ unsigned num_tfeedback_decls = 0;
+ unsigned int num_explicit_uniform_locs = 0;
+ bool has_xfb_qualifiers = false;
+ char **varying_names = NULL;
tfeedback_decl *tfeedback_decls = NULL;
- unsigned num_tfeedback_decls = prog->TransformFeedback.NumVarying;
void *mem_ctx = ralloc_context(NULL); // temporary linker context
last = i;
}
- check_explicit_uniform_locations(ctx, prog);
+ num_explicit_uniform_locs = check_explicit_uniform_locations(ctx, prog);
link_assign_subroutine_types(prog);
if (!prog->LinkStatus)
if (prog->SeparateShader)
disable_varying_optimizations_for_sso(prog);
- if (!interstage_cross_validate_uniform_blocks(prog))
+ /* Process UBOs */
+ if (!interstage_cross_validate_uniform_blocks(prog, false))
+ goto done;
+
+ /* Process SSBOs */
+ if (!interstage_cross_validate_uniform_blocks(prog, true))
goto done;
/* Do common optimization before assigning storage for attributes,
goto done;
}
+ /* From the ARB_enhanced_layouts spec:
+ *
+ * "If the shader used to record output variables for transform feedback
+ * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
+ * qualifiers, the values specified by TransformFeedbackVaryings are
+ * ignored, and the set of variables captured for transform feedback is
+ * instead derived from the specified layout qualifiers."
+ */
+ for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
+ /* Find last stage before fragment shader */
+ if (prog->_LinkedShaders[i]) {
+ has_xfb_qualifiers =
+ process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
+ &num_tfeedback_decls,
+ &varying_names);
+ break;
+ }
+ }
+
+ if (!has_xfb_qualifiers) {
+ num_tfeedback_decls = prog->TransformFeedback.NumVarying;
+ varying_names = prog->TransformFeedback.VaryingNames;
+ }
+
if (num_tfeedback_decls != 0) {
/* From GL_EXT_transform_feedback:
* A program will fail to link if:
* non-zero, but the program object has no vertex or geometry
* shader;
*/
- if (first == MESA_SHADER_FRAGMENT) {
+ if (first >= MESA_SHADER_FRAGMENT) {
linker_error(prog, "Transform feedback varyings specified, but "
- "no vertex or geometry shader is present.\n");
+ "no vertex, tessellation, or geometry shader is "
+ "present.\n");
goto done;
}
tfeedback_decls = ralloc_array(mem_ctx, tfeedback_decl,
- prog->TransformFeedback.NumVarying);
+ num_tfeedback_decls);
if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
- prog->TransformFeedback.VaryingNames,
- tfeedback_decls))
+ varying_names, tfeedback_decls))
goto done;
}
}
}
- if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls))
+ if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
+ has_xfb_qualifiers))
goto done;
update_array_sizes(prog);
- link_assign_uniform_locations(prog, ctx->Const.UniformBooleanTrue);
+ link_assign_uniform_locations(prog, ctx->Const.UniformBooleanTrue,
+ num_explicit_uniform_locs,
+ ctx->Const.MaxUserAssignableUniformLocations);
link_assign_atomic_counter_resources(ctx, prog);
store_fragdepth_layout(prog);
}
}
- /* Split BufferInterfaceBlocks into UniformBlocks and ShaderStorageBlocks
- * for gl_shader_program and gl_shader, so that drivers that need separate
- * index spaces for each set can have that.
- */
- for (unsigned i = MESA_SHADER_VERTEX; i < MESA_SHADER_STAGES; i++) {
- if (prog->_LinkedShaders[i] != NULL) {
- gl_shader *sh = prog->_LinkedShaders[i];
- split_ubos_and_ssbos(sh,
- sh->BufferInterfaceBlocks,
- sh->NumBufferInterfaceBlocks,
- &sh->UniformBlocks,
- &sh->NumUniformBlocks,
- NULL,
- &sh->ShaderStorageBlocks,
- &sh->NumShaderStorageBlocks,
- NULL);
- }
- }
-
- split_ubos_and_ssbos(prog,
- prog->BufferInterfaceBlocks,
- prog->NumBufferInterfaceBlocks,
- &prog->UniformBlocks,
- &prog->NumUniformBlocks,
- &prog->UboInterfaceBlockIndex,
- &prog->ShaderStorageBlocks,
- &prog->NumShaderStorageBlocks,
- &prog->SsboInterfaceBlockIndex);
-
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
if (prog->_LinkedShaders[i] == NULL)
continue;