#include "ir.h"
#include "program.h"
#include "program/prog_instruction.h"
+#include "program/program.h"
#include "util/set.h"
#include "util/string_to_uint_map.h"
#include "linker.h"
};
-class array_resize_visitor : public ir_hierarchical_visitor {
+/**
+ * A visitor helper that provides methods for updating the types of
+ * ir_dereferences. Classes that update variable types (say, updating
+ * array sizes) will want to use this so that dereference types stay in sync.
+ */
+class deref_type_updater : public ir_hierarchical_visitor {
+public:
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ ir->type = ir->var->type;
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
+ {
+ const glsl_type *const vt = ir->array->type;
+ if (vt->is_array())
+ ir->type = vt->fields.array;
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_dereference_record *ir)
+ {
+ for (unsigned i = 0; i < ir->record->type->length; i++) {
+ const struct glsl_struct_field *field =
+ &ir->record->type->fields.structure[i];
+ if (strcmp(field->name, ir->field) == 0) {
+ ir->type = field->type;
+ break;
+ }
+ }
+ return visit_continue;
+ }
+};
+
+
+class array_resize_visitor : public deref_type_updater {
public:
unsigned num_vertices;
gl_shader_program *prog;
return visit_continue;
}
-
- /* Dereferences of input variables need to be updated so that their type
- * matches the newly assigned type of the variable they are accessing. */
- virtual ir_visitor_status visit(ir_dereference_variable *ir)
- {
- ir->type = ir->var->type;
- return visit_continue;
- }
-
- /* Dereferences of 2D input arrays need to be updated so that their type
- * matches the newly assigned type of the array they are accessing. */
- virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
- {
- const glsl_type *const vt = ir->array->type;
- if (vt->is_array())
- ir->type = vt->fields.array;
- return visit_continue;
- }
};
/**
{
va_list ap;
- ralloc_strcat(&prog->InfoLog, "error: ");
+ ralloc_strcat(&prog->data->InfoLog, "error: ");
va_start(ap, fmt);
- ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
+ ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
va_end(ap);
- prog->LinkStatus = false;
+ prog->data->LinkStatus = false;
}
{
va_list ap;
- ralloc_strcat(&prog->InfoLog, "warning: ");
+ ralloc_strcat(&prog->data->InfoLog, "warning: ");
va_start(ap, fmt);
- ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
+ ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
va_end(ap);
}
*clip_distance_array_size = 0;
*cull_distance_array_size = 0;
- if (prog->Version >= (prog->IsES ? 300 : 130)) {
+ if (prog->data->Version >= (prog->IsES ? 300 : 130)) {
/* From section 7.1 (Vertex Shader Special Variables) of the
* GLSL 1.30 spec:
*
* All GLSL ES Versions are similar to GLSL 1.40--failing to write to
* gl_Position is not an error.
*/
- if (prog->Version < (prog->IsES ? 300 : 140)) {
+ if (prog->data->Version < (prog->IsES ? 300 : 140)) {
find_assignment_visitor find("gl_Position");
find.run(shader->ir);
if (!find.variable_found()) {
return;
}
- if (prog->IsES && existing->data.precision != var->data.precision) {
+ /* Only in GLSL ES 3.10, the precision qualifier should not match
+ * between block members defined in matched block names within a
+ * shader interface.
+ *
+ * In GLSL ES 3.00 and ES 3.20, precision qualifier for each block
+ * member should match.
+ */
+ if (prog->IsES && (prog->data->Version != 310 ||
+ !var->get_interface_type()) &&
+ existing->data.precision != var->data.precision) {
linker_error(prog, "declarations for %s `%s` have "
"mismatching precision qualifiers\n",
mode_string(var), var->name);
{
int *InterfaceBlockStageIndex[MESA_SHADER_STAGES];
struct gl_uniform_block *blks = NULL;
- unsigned *num_blks = validate_ssbo ? &prog->NumShaderStorageBlocks :
- &prog->NumUniformBlocks;
+ unsigned *num_blks = validate_ssbo ? &prog->data->NumShaderStorageBlocks :
+ &prog->data->NumUniformBlocks;
unsigned max_num_buffer_blocks = 0;
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
if (stage_index != -1) {
struct gl_linked_shader *sh = prog->_LinkedShaders[i];
- blks[j].stageref |= (1 << i);
-
struct gl_uniform_block **sh_blks = validate_ssbo ?
sh->ShaderStorageBlocks : sh->UniformBlocks;
+ blks[j].stageref |= sh_blks[stage_index]->stageref;
sh_blks[stage_index] = &blks[j];
}
}
}
if (validate_ssbo)
- prog->ShaderStorageBlocks = blks;
+ prog->data->ShaderStorageBlocks = blks;
else
- prog->UniformBlocks = blks;
+ prog->data->UniformBlocks = blks;
return true;
}
* it inside that function leads to compiler warnings with some versions of
* gcc.
*/
-class array_sizing_visitor : public ir_hierarchical_visitor {
+class array_sizing_visitor : public deref_type_updater {
public:
array_sizing_visitor()
: mem_ctx(ralloc_context(NULL)),
}
glsl_interface_packing packing =
(glsl_interface_packing) type->interface_packing;
+ bool row_major = (bool) type->interface_row_major;
const glsl_type *new_ifc_type =
glsl_type::get_interface_instance(fields, num_fields,
- packing, type->name);
+ packing, row_major, type->name);
delete [] fields;
return new_ifc_type;
}
}
glsl_interface_packing packing =
(glsl_interface_packing) ifc_type->interface_packing;
+ bool row_major = (bool) ifc_type->interface_row_major;
const glsl_type *new_ifc_type =
glsl_type::get_interface_instance(fields, num_fields, packing,
- ifc_type->name);
+ row_major, ifc_type->name);
delete [] fields;
for (unsigned i = 0; i < num_fields; i++) {
if (interface_vars[i] != NULL)
linked_shader->info.BlendSupport = 0;
if (linked_shader->Stage != MESA_SHADER_FRAGMENT ||
- (prog->Version < 150 && !prog->ARB_fragment_coord_conventions_enable))
+ (prog->data->Version < 150 &&
+ !prog->ARB_fragment_coord_conventions_enable))
return;
for (unsigned i = 0; i < num_shaders; i++) {
linked_shader->info.EarlyFragmentTests |=
shader->info.EarlyFragmentTests;
+ linked_shader->info.InnerCoverage |=
+ shader->info.InnerCoverage;
+ linked_shader->Program->info.fs.post_depth_coverage |=
+ shader->info.PostDepthCoverage;
+
linked_shader->info.BlendSupport |= shader->info.BlendSupport;
}
}
/* No in/out qualifiers defined for anything but GLSL 1.50+
* geometry shaders so far.
*/
- if (linked_shader->Stage != MESA_SHADER_GEOMETRY || prog->Version < 150)
+ if (linked_shader->Stage != MESA_SHADER_GEOMETRY ||
+ prog->data->Version < 150)
return;
/* From the GLSL 1.50 spec, page 46:
for (int i = 0; i < 3; i++)
linked_shader->info.Comp.LocalSize[i] = 0;
+ linked_shader->info.Comp.LocalSizeVariable = false;
+
/* This function is called for all shader stages, but it only has an effect
* for compute shaders.
*/
linked_shader->info.Comp.LocalSize[i] =
shader->info.Comp.LocalSize[i];
}
+ } else if (shader->info.Comp.LocalSizeVariable) {
+ if (linked_shader->info.Comp.LocalSize[0] != 0) {
+ /* The ARB_compute_variable_group_size spec says:
+ *
+ * If one compute shader attached to a program declares a
+ * variable local group size and a second compute shader
+ * attached to the same program declares a fixed local group
+ * size, a link-time error results.
+ */
+ linker_error(prog, "compute shader defined with both fixed and "
+ "variable local group size\n");
+ return;
+ }
+ linked_shader->info.Comp.LocalSizeVariable = true;
}
}
* since we already know we're in the right type of shader program
* for doing it.
*/
- if (linked_shader->info.Comp.LocalSize[0] == 0) {
- linker_error(prog, "compute shader didn't declare local size\n");
+ if (linked_shader->info.Comp.LocalSize[0] == 0 &&
+ !linked_shader->info.Comp.LocalSizeVariable) {
+ linker_error(prog, "compute shader must contain a fixed or a variable "
+ "local group size\n");
return;
}
for (int i = 0; i < 3; i++)
prog->Comp.LocalSize[i] = linked_shader->info.Comp.LocalSize[i];
+
+ prog->Comp.LocalSizeVariable =
+ linked_shader->info.Comp.LocalSizeVariable;
}
* If this function is supplied a single shader, it is cloned, and the new
* shader is returned.
*/
-static struct gl_linked_shader *
+struct gl_linked_shader *
link_intrastage_shaders(void *mem_ctx,
struct gl_context *ctx,
struct gl_shader_program *prog,
struct gl_shader **shader_list,
- unsigned num_shaders)
+ unsigned num_shaders,
+ bool allow_missing_main)
{
struct gl_uniform_block *ubo_blocks = NULL;
struct gl_uniform_block *ssbo_blocks = NULL;
cross_validate_globals(prog, shader_list[i]->ir, &variables, false);
}
- if (!prog->LinkStatus)
+ if (!prog->data->LinkStatus)
return NULL;
/* Check that interface blocks defined in multiple shaders are consistent.
*/
validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
num_shaders);
- if (!prog->LinkStatus)
+ if (!prog->data->LinkStatus)
return NULL;
/* Check that there is only a single definition of each function signature
}
}
+ if (main == NULL && allow_missing_main)
+ main = shader_list[0];
+
if (main == NULL) {
linker_error(prog, "%s shader lacks `main'\n",
_mesa_shader_stage_to_string(shader_list[0]->Stage));
}
gl_linked_shader *linked = ctx->Driver.NewShader(shader_list[0]->Stage);
+
+ /* Create program and attach it to the linked shader */
+ struct gl_program *gl_prog =
+ ctx->Driver.NewProgram(ctx,
+ _mesa_shader_stage_to_program(shader_list[0]->Stage),
+ prog->Name);
+ if (!gl_prog) {
+ prog->data->LinkStatus = false;
+ _mesa_delete_linked_shader(ctx, linked);
+ return NULL;
+ }
+
+ /* Don't use _mesa_reference_program() just take ownership */
+ linked->Program = gl_prog;
+
linked->ir = new(linked) exec_list;
clone_ir_list(mem_ctx, linked->ir, main->ir);
/* Move any instructions other than variable declarations or function
* declarations into main.
*/
- exec_node *insertion_point =
- move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
- linked);
+ if (main_sig != NULL) {
+ exec_node *insertion_point =
+ move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
+ linked);
- for (unsigned i = 0; i < num_shaders; i++) {
- if (shader_list[i] == main)
- continue;
+ for (unsigned i = 0; i < num_shaders; i++) {
+ if (shader_list[i] == main)
+ continue;
- insertion_point = move_non_declarations(shader_list[i]->ir,
- insertion_point, true, linked);
+ insertion_point = move_non_declarations(shader_list[i]->ir,
+ insertion_point, true, linked);
+ }
}
if (!link_function_calls(prog, linked, shader_list, num_shaders)) {
link_uniform_blocks(mem_ctx, ctx, prog, linked, &ubo_blocks,
&num_ubo_blocks, &ssbo_blocks, &num_ssbo_blocks);
- if (!prog->LinkStatus) {
+ if (!prog->data->LinkStatus) {
_mesa_delete_linked_shader(ctx, linked);
return NULL;
}
if (ctx->Const.VertexID_is_zero_based)
lower_vertex_id(linked);
+#ifdef DEBUG
+ /* Compute the source checksum. */
+ linked->SourceChecksum = 0;
+ for (unsigned i = 0; i < num_shaders; i++) {
+ if (shader_list[i] == NULL)
+ continue;
+ linked->SourceChecksum ^= shader_list[i]->SourceChecksum;
+ }
+#endif
+
return linked;
}
if (prog->_LinkedShaders[i] == NULL)
continue;
+ bool types_were_updated = false;
+
foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
ir_variable *const var = node->as_variable();
var->type = glsl_type::get_array_instance(var->type->fields.array,
size + 1);
- /* FINISHME: We should update the types of array
- * dereferences of this variable now.
- */
+ types_were_updated = true;
}
}
+
+ /* Update the types of dereferences in case we changed any. */
+ if (types_were_updated) {
+ deref_type_updater v;
+ v.run(prog->_LinkedShaders[i]->ir);
+ }
}
}
}
}
} else if (target_index == MESA_SHADER_FRAGMENT ||
- (prog->IsES && prog->Version >= 300)) {
+ (prog->IsES && prog->data->Version >= 300)) {
linker_error(prog, "overlapping location is assigned "
"to %s `%s' %d %d %d\n", string, var->name,
used_locations, use_mask, attr);
ctx->Const.MaxCombinedShaderStorageBlocks);
}
- for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
- if (prog->UniformBlocks[i].UniformBufferSize >
+ for (unsigned i = 0; i < prog->data->NumUniformBlocks; i++) {
+ if (prog->data->UniformBlocks[i].UniformBufferSize >
ctx->Const.MaxUniformBlockSize) {
linker_error(prog, "Uniform block %s too big (%d/%d)\n",
- prog->UniformBlocks[i].Name,
- prog->UniformBlocks[i].UniformBufferSize,
+ prog->data->UniformBlocks[i].Name,
+ prog->data->UniformBlocks[i].UniformBufferSize,
ctx->Const.MaxUniformBlockSize);
}
}
- for (unsigned i = 0; i < prog->NumShaderStorageBlocks; i++) {
- if (prog->ShaderStorageBlocks[i].UniformBufferSize >
+ for (unsigned i = 0; i < prog->data->NumShaderStorageBlocks; i++) {
+ if (prog->data->ShaderStorageBlocks[i].UniformBufferSize >
ctx->Const.MaxShaderStorageBlockSize) {
linker_error(prog, "Shader storage block %s too big (%d/%d)\n",
- prog->ShaderStorageBlocks[i].Name,
- prog->ShaderStorageBlocks[i].UniformBufferSize,
+ prog->data->ShaderStorageBlocks[i].Name,
+ prog->data->ShaderStorageBlocks[i].UniformBufferSize,
ctx->Const.MaxShaderStorageBlockSize);
}
}
static void
link_calculate_subroutine_compat(struct gl_shader_program *prog)
{
- for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- struct gl_linked_shader *sh = prog->_LinkedShaders[i];
- int count;
- if (!sh)
- continue;
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ struct gl_program *p = prog->_LinkedShaders[i]->Program;
- for (unsigned j = 0; j < sh->NumSubroutineUniformRemapTable; j++) {
- if (sh->SubroutineUniformRemapTable[j] == INACTIVE_UNIFORM_EXPLICIT_LOCATION)
+ for (unsigned j = 0; j < p->sh.NumSubroutineUniformRemapTable; j++) {
+ if (p->sh.SubroutineUniformRemapTable[j] == INACTIVE_UNIFORM_EXPLICIT_LOCATION)
continue;
- struct gl_uniform_storage *uni = sh->SubroutineUniformRemapTable[j];
+ struct gl_uniform_storage *uni = p->sh.SubroutineUniformRemapTable[j];
if (!uni)
continue;
- sh->NumSubroutineUniforms++;
- count = 0;
- if (sh->NumSubroutineFunctions == 0) {
+ int count = 0;
+ if (p->sh.NumSubroutineFunctions == 0) {
linker_error(prog, "subroutine uniform %s defined but no valid functions found\n", uni->type->name);
continue;
}
- for (unsigned f = 0; f < sh->NumSubroutineFunctions; f++) {
- struct gl_subroutine_function *fn = &sh->SubroutineFunctions[f];
+ for (unsigned f = 0; f < p->sh.NumSubroutineFunctions; f++) {
+ struct gl_subroutine_function *fn = &p->sh.SubroutineFunctions[f];
for (int k = 0; k < fn->num_compat_types; k++) {
if (fn->types[k] == uni->type) {
count++;
static void
check_subroutine_resources(struct gl_shader_program *prog)
{
- for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ struct gl_program *p = prog->_LinkedShaders[i]->Program;
- if (sh) {
- if (sh->NumSubroutineUniformRemapTable > MAX_SUBROUTINE_UNIFORM_LOCATIONS)
- linker_error(prog, "Too many %s shader subroutine uniforms\n",
- _mesa_shader_stage_to_string(i));
+ if (p->sh.NumSubroutineUniformRemapTable > MAX_SUBROUTINE_UNIFORM_LOCATIONS) {
+ linker_error(prog, "Too many %s shader subroutine uniforms\n",
+ _mesa_shader_stage_to_string(i));
}
}
}
static bool
reserve_subroutine_explicit_locations(struct gl_shader_program *prog,
- struct gl_linked_shader *sh,
+ struct gl_program *p,
ir_variable *var)
{
unsigned slots = var->type->uniform_locations();
unsigned max_loc = var->data.location + slots - 1;
/* Resize remap table if locations do not fit in the current one. */
- if (max_loc + 1 > sh->NumSubroutineUniformRemapTable) {
- sh->SubroutineUniformRemapTable =
- reralloc(sh, sh->SubroutineUniformRemapTable,
+ if (max_loc + 1 > p->sh.NumSubroutineUniformRemapTable) {
+ p->sh.SubroutineUniformRemapTable =
+ reralloc(p, p->sh.SubroutineUniformRemapTable,
gl_uniform_storage *,
max_loc + 1);
- if (!sh->SubroutineUniformRemapTable) {
+ if (!p->sh.SubroutineUniformRemapTable) {
linker_error(prog, "Out of memory during linking.\n");
return false;
}
/* Initialize allocated space. */
- for (unsigned i = sh->NumSubroutineUniformRemapTable; i < max_loc + 1; i++)
- sh->SubroutineUniformRemapTable[i] = NULL;
+ for (unsigned i = p->sh.NumSubroutineUniformRemapTable; i < max_loc + 1; i++)
+ p->sh.SubroutineUniformRemapTable[i] = NULL;
- sh->NumSubroutineUniformRemapTable = max_loc + 1;
+ p->sh.NumSubroutineUniformRemapTable = max_loc + 1;
}
for (unsigned i = 0; i < slots; i++) {
unsigned loc = var->data.location + i;
/* Check if location is already used. */
- if (sh->SubroutineUniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
+ if (p->sh.SubroutineUniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
/* ARB_explicit_uniform_location specification states:
* "No two subroutine uniform variables can have the same location
/* Initialize location as inactive before optimization
* rounds and location assignment.
*/
- sh->SubroutineUniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
+ p->sh.SubroutineUniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
}
return true;
}
unsigned entries_total = 0;
- for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ struct gl_program *p = prog->_LinkedShaders[i]->Program;
- if (!sh)
- continue;
-
- foreach_in_list(ir_instruction, node, sh->ir) {
+ foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
ir_variable *var = node->as_variable();
if (!var || var->data.mode != ir_var_uniform)
continue;
if (var->data.explicit_location) {
bool ret = false;
if (var->type->without_array()->is_subroutine())
- ret = reserve_subroutine_explicit_locations(prog, sh, var);
+ ret = reserve_subroutine_explicit_locations(prog, p, var);
else {
int slots = reserve_explicit_locations(prog, uniform_map,
var);
if (type != GL_BUFFER_VARIABLE)
return true;
- for (unsigned i = 0; i < shProg->NumShaderStorageBlocks; i++) {
- const char *block_name = shProg->ShaderStorageBlocks[i].Name;
+ for (unsigned i = 0; i < shProg->data->NumShaderStorageBlocks; i++) {
+ const char *block_name = shProg->data->ShaderStorageBlocks[i].Name;
block_name_len = strlen(block_name);
const char *block_square_bracket = strchr(block_name, '[');
create_shader_variable(struct gl_shader_program *shProg,
const ir_variable *in,
const char *name, const glsl_type *type,
+ const glsl_type *interface_type,
bool use_implicit_location, int location,
const glsl_type *outermost_struct_type)
{
out->type = type;
out->outermost_struct_type = outermost_struct_type;
- out->interface_type = in->get_interface_type();
+ out->interface_type = interface_type;
out->component = in->data.location_frac;
out->index = in->data.index;
out->patch = in->data.patch;
return out;
}
+static const glsl_type *
+resize_to_max_patch_vertices(const struct gl_context *ctx,
+ const glsl_type *type)
+{
+ if (!type)
+ return NULL;
+
+ return glsl_type::get_array_instance(type->fields.array,
+ ctx->Const.MaxPatchVertices);
+}
+
static bool
-add_shader_variable(struct gl_shader_program *shProg, struct set *resource_set,
+add_shader_variable(const struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ struct set *resource_set,
unsigned stage_mask,
GLenum programInterface, ir_variable *var,
const char *name, const glsl_type *type,
bool use_implicit_location, int location,
const glsl_type *outermost_struct_type = NULL)
{
- const bool is_vertex_input =
- programInterface == GL_PROGRAM_INPUT &&
- stage_mask == MESA_SHADER_VERTEX;
+ const glsl_type *interface_type = var->get_interface_type();
+
+ if (outermost_struct_type == NULL) {
+ /* Unsized (non-patch) TCS output/TES input arrays are implicitly
+ * sized to gl_MaxPatchVertices. Internally, we shrink them to a
+ * smaller size.
+ *
+ * This can cause trouble with SSO programs. Since the TCS declares
+ * the number of output vertices, we can always shrink TCS output
+ * arrays. However, the TES might not be linked with a TCS, in
+ * which case it won't know the size of the patch. In other words,
+ * the TCS and TES may disagree on the (smaller) array sizes. This
+ * can result in the resource names differing across stages, causing
+ * SSO validation failures and other cascading issues.
+ *
+ * Expanding the array size to the full gl_MaxPatchVertices fixes
+ * these issues. It's also what program interface queries expect,
+ * as that is the official size of the array.
+ */
+ if (var->data.tess_varying_implicit_sized_array) {
+ type = resize_to_max_patch_vertices(ctx, type);
+ interface_type = resize_to_max_patch_vertices(ctx, interface_type);
+ }
+
+ if (var->data.from_named_ifc_block) {
+ const char *interface_name = interface_type->name;
+
+ if (interface_type->is_array()) {
+ /* Issue #16 of the ARB_program_interface_query spec says:
+ *
+ * "* If a variable is a member of an interface block without an
+ * instance name, it is enumerated using just the variable name.
+ *
+ * * If a variable is a member of an interface block with an
+ * instance name, it is enumerated as "BlockName.Member", where
+ * "BlockName" is the name of the interface block (not the
+ * instance name) and "Member" is the name of the variable."
+ *
+ * In particular, it indicates that it should be "BlockName",
+ * not "BlockName[array length]". The conformance suite and
+ * dEQP both require this behavior.
+ *
+ * Here, we unwrap the extra array level added by named interface
+ * block array lowering so we have the correct variable type. We
+ * also unwrap the interface type when constructing the name.
+ *
+ * We leave interface_type the same so that ES 3.x SSO pipeline
+ * validation can enforce the rules requiring array length to
+ * match on interface blocks.
+ */
+ type = type->fields.array;
+
+ interface_name = interface_type->fields.array->name;
+ }
+
+ name = ralloc_asprintf(shProg, "%s.%s", interface_name, name);
+ }
+ }
switch (type->base_type) {
case GLSL_TYPE_STRUCT: {
for (unsigned i = 0; i < type->length; i++) {
const struct glsl_struct_field *field = &type->fields.structure[i];
char *field_name = ralloc_asprintf(shProg, "%s.%s", name, field->name);
- if (!add_shader_variable(shProg, resource_set,
+ if (!add_shader_variable(ctx, shProg, resource_set,
stage_mask, programInterface,
var, field_name, field->type,
use_implicit_location, field_location,
outermost_struct_type))
return false;
- field_location +=
- field->type->count_attribute_slots(is_vertex_input);
+ field_location += field->type->count_attribute_slots(false);
}
return true;
}
default: {
- /* Issue #16 of the ARB_program_interface_query spec says:
- *
- * "* If a variable is a member of an interface block without an
- * instance name, it is enumerated using just the variable name.
- *
- * * If a variable is a member of an interface block with an instance
- * name, it is enumerated as "BlockName.Member", where "BlockName" is
- * the name of the interface block (not the instance name) and
- * "Member" is the name of the variable."
- */
- const char *prefixed_name = (var->data.from_named_ifc_block &&
- !is_gl_identifier(var->name))
- ? ralloc_asprintf(shProg, "%s.%s", var->get_interface_type()->name,
- name)
- : name;
-
/* The ARB_program_interface_query spec says:
*
* "For an active variable declared as a single instance of a basic
* from the shader source."
*/
gl_shader_variable *sha_v =
- create_shader_variable(shProg, var, prefixed_name, type,
+ create_shader_variable(shProg, var, name, type, interface_type,
use_implicit_location, location,
outermost_struct_type);
if (!sha_v)
}
static bool
-add_interface_variables(struct gl_shader_program *shProg,
+add_interface_variables(const struct gl_context *ctx,
+ struct gl_shader_program *shProg,
struct set *resource_set,
unsigned stage, GLenum programInterface)
{
(stage == MESA_SHADER_VERTEX && var->data.mode == ir_var_shader_in) ||
(stage == MESA_SHADER_FRAGMENT && var->data.mode == ir_var_shader_out);
- if (!add_shader_variable(shProg, resource_set,
+ if (!add_shader_variable(ctx, shProg, resource_set,
1 << stage, programInterface,
var, var->name, var->type, vs_input_or_fs_output,
var->data.location - loc_bias))
}
static bool
-add_packed_varyings(struct gl_shader_program *shProg, struct set *resource_set,
+add_packed_varyings(const struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ struct set *resource_set,
int stage, GLenum type)
{
struct gl_linked_shader *sh = shProg->_LinkedShaders[stage];
if (type == iface) {
const int stage_mask =
build_stageref(shProg, var->name, var->data.mode);
- if (!add_shader_variable(shProg, resource_set,
+ if (!add_shader_variable(ctx, shProg, resource_set,
stage_mask,
iface, var, var->name, var->type, false,
var->data.location - VARYING_SLOT_VAR0))
}
static bool
-add_fragdata_arrays(struct gl_shader_program *shProg, struct set *resource_set)
+add_fragdata_arrays(const struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ struct set *resource_set)
{
struct gl_linked_shader *sh = shProg->_LinkedShaders[MESA_SHADER_FRAGMENT];
if (var) {
assert(var->data.mode == ir_var_shader_out);
- if (!add_shader_variable(shProg, resource_set,
+ if (!add_shader_variable(ctx, shProg, resource_set,
1 << MESA_SHADER_FRAGMENT,
GL_PROGRAM_OUTPUT, var, var->name, var->type,
true, var->data.location - FRAG_RESULT_DATA0))
char *var_name = get_top_level_name(uni->name);
char *interface_name =
get_top_level_name(uni->is_shader_storage ?
- shProg->ShaderStorageBlocks[block_index].Name :
- shProg->UniformBlocks[block_index].Name);
+ shProg->data->ShaderStorageBlocks[block_index].Name :
+ shProg->data->UniformBlocks[block_index].Name);
if (strcmp(var_name, interface_name) == 0) {
/* Deal with instanced array of SSBOs */
/* Program interface needs to expose varyings in case of SSO. */
if (shProg->SeparateShader) {
- if (!add_packed_varyings(shProg, resource_set,
+ if (!add_packed_varyings(ctx, shProg, resource_set,
input_stage, GL_PROGRAM_INPUT))
return;
- if (!add_packed_varyings(shProg, resource_set,
+ if (!add_packed_varyings(ctx, shProg, resource_set,
output_stage, GL_PROGRAM_OUTPUT))
return;
}
- if (!add_fragdata_arrays(shProg, resource_set))
+ if (!add_fragdata_arrays(ctx, shProg, resource_set))
return;
/* Add inputs and outputs to the resource list. */
- if (!add_interface_variables(shProg, resource_set,
+ if (!add_interface_variables(ctx, shProg, resource_set,
input_stage, GL_PROGRAM_INPUT))
return;
- if (!add_interface_variables(shProg, resource_set,
+ if (!add_interface_variables(ctx, shProg, resource_set,
output_stage, GL_PROGRAM_OUTPUT))
return;
}
/* Add uniforms from uniform storage. */
- for (unsigned i = 0; i < shProg->NumUniformStorage; i++) {
+ for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
/* Do not add uniforms internally used by Mesa. */
- if (shProg->UniformStorage[i].hidden)
+ if (shProg->data->UniformStorage[i].hidden)
continue;
uint8_t stageref =
- build_stageref(shProg, shProg->UniformStorage[i].name,
+ build_stageref(shProg, shProg->data->UniformStorage[i].name,
ir_var_uniform);
/* Add stagereferences for uniforms in a uniform block. */
- bool is_shader_storage = shProg->UniformStorage[i].is_shader_storage;
- int block_index = shProg->UniformStorage[i].block_index;
+ bool is_shader_storage =
+ shProg->data->UniformStorage[i].is_shader_storage;
+ int block_index = shProg->data->UniformStorage[i].block_index;
if (block_index != -1) {
stageref |= is_shader_storage ?
- shProg->ShaderStorageBlocks[block_index].stageref :
- shProg->UniformBlocks[block_index].stageref;
+ shProg->data->ShaderStorageBlocks[block_index].stageref :
+ shProg->data->UniformBlocks[block_index].stageref;
}
GLenum type = is_shader_storage ? GL_BUFFER_VARIABLE : GL_UNIFORM;
if (!should_add_buffer_variable(shProg, type,
- shProg->UniformStorage[i].name))
+ shProg->data->UniformStorage[i].name))
continue;
if (is_shader_storage) {
- calculate_array_size_and_stride(shProg, &shProg->UniformStorage[i]);
+ calculate_array_size_and_stride(shProg,
+ &shProg->data->UniformStorage[i]);
}
if (!add_program_resource(shProg, resource_set, type,
- &shProg->UniformStorage[i], stageref))
+ &shProg->data->UniformStorage[i], stageref))
return;
}
/* Add program uniform blocks. */
- for (unsigned i = 0; i < shProg->NumUniformBlocks; i++) {
+ for (unsigned i = 0; i < shProg->data->NumUniformBlocks; i++) {
if (!add_program_resource(shProg, resource_set, GL_UNIFORM_BLOCK,
- &shProg->UniformBlocks[i], 0))
+ &shProg->data->UniformBlocks[i], 0))
return;
}
/* Add program shader storage blocks. */
- for (unsigned i = 0; i < shProg->NumShaderStorageBlocks; i++) {
+ for (unsigned i = 0; i < shProg->data->NumShaderStorageBlocks; i++) {
if (!add_program_resource(shProg, resource_set, GL_SHADER_STORAGE_BLOCK,
- &shProg->ShaderStorageBlocks[i], 0))
+ &shProg->data->ShaderStorageBlocks[i], 0))
return;
}
/* Add atomic counter buffers. */
- for (unsigned i = 0; i < shProg->NumAtomicBuffers; i++) {
+ for (unsigned i = 0; i < shProg->data->NumAtomicBuffers; i++) {
if (!add_program_resource(shProg, resource_set, GL_ATOMIC_COUNTER_BUFFER,
- &shProg->AtomicBuffers[i], 0))
+ &shProg->data->AtomicBuffers[i], 0))
return;
}
- for (unsigned i = 0; i < shProg->NumUniformStorage; i++) {
+ for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
GLenum type;
- if (!shProg->UniformStorage[i].hidden)
+ if (!shProg->data->UniformStorage[i].hidden)
continue;
for (int j = MESA_SHADER_VERTEX; j < MESA_SHADER_STAGES; j++) {
- if (!shProg->UniformStorage[i].opaque[j].active ||
- !shProg->UniformStorage[i].type->is_subroutine())
+ if (!shProg->data->UniformStorage[i].opaque[j].active ||
+ !shProg->data->UniformStorage[i].type->is_subroutine())
continue;
type = _mesa_shader_stage_to_subroutine_uniform((gl_shader_stage)j);
/* add shader subroutines */
if (!add_program_resource(shProg, resource_set,
- type, &shProg->UniformStorage[i], 0))
+ type, &shProg->data->UniformStorage[i], 0))
return;
}
}
- for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- struct gl_linked_shader *sh = shProg->_LinkedShaders[i];
- GLuint type;
-
- if (!sh)
- continue;
+ unsigned mask = shProg->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ struct gl_program *p = shProg->_LinkedShaders[i]->Program;
- type = _mesa_shader_stage_to_subroutine((gl_shader_stage)i);
- for (unsigned j = 0; j < sh->NumSubroutineFunctions; j++) {
+ GLuint type = _mesa_shader_stage_to_subroutine((gl_shader_stage)i);
+ for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
if (!add_program_resource(shProg, resource_set,
- type, &sh->SubroutineFunctions[j], 0))
+ type, &p->sh.SubroutineFunctions[j], 0))
return;
}
}
"expressions is forbidden in GLSL %s %u";
/* Backend has indicated that it has no dynamic indexing support. */
if (no_dynamic_indexing) {
- linker_error(prog, msg, prog->IsES ? "ES" : "", prog->Version);
+ linker_error(prog, msg, prog->IsES ? "ES" : "",
+ prog->data->Version);
return false;
} else {
- linker_warning(prog, msg, prog->IsES ? "ES" : "", prog->Version);
+ linker_warning(prog, msg, prog->IsES ? "ES" : "",
+ prog->data->Version);
}
}
}
static void
link_assign_subroutine_types(struct gl_shader_program *prog)
{
- for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
- gl_linked_shader *sh = prog->_LinkedShaders[i];
-
- if (sh == NULL)
- continue;
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ gl_program *p = prog->_LinkedShaders[i]->Program;
- sh->MaxSubroutineFunctionIndex = 0;
- foreach_in_list(ir_instruction, node, sh->ir) {
+ p->sh.MaxSubroutineFunctionIndex = 0;
+ foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
ir_function *fn = node->as_function();
if (!fn)
continue;
if (fn->is_subroutine)
- sh->NumSubroutineUniformTypes++;
+ p->sh.NumSubroutineUniformTypes++;
if (!fn->num_subroutine_types)
continue;
/* these should have been calculated earlier. */
assert(fn->subroutine_index != -1);
- if (sh->NumSubroutineFunctions + 1 > MAX_SUBROUTINES) {
+ if (p->sh.NumSubroutineFunctions + 1 > MAX_SUBROUTINES) {
linker_error(prog, "Too many subroutine functions declared.\n");
return;
}
- sh->SubroutineFunctions = reralloc(sh, sh->SubroutineFunctions,
+ p->sh.SubroutineFunctions = reralloc(p, p->sh.SubroutineFunctions,
struct gl_subroutine_function,
- sh->NumSubroutineFunctions + 1);
- sh->SubroutineFunctions[sh->NumSubroutineFunctions].name = ralloc_strdup(sh, fn->name);
- sh->SubroutineFunctions[sh->NumSubroutineFunctions].num_compat_types = fn->num_subroutine_types;
- sh->SubroutineFunctions[sh->NumSubroutineFunctions].types =
- ralloc_array(sh, const struct glsl_type *,
+ p->sh.NumSubroutineFunctions + 1);
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].name = ralloc_strdup(p, fn->name);
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].num_compat_types = fn->num_subroutine_types;
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types =
+ ralloc_array(p, const struct glsl_type *,
fn->num_subroutine_types);
/* From Section 4.4.4(Subroutine Function Layout Qualifiers) of the
* given a unique index, otherwise a compile or link error will be
* generated."
*/
- for (unsigned j = 0; j < sh->NumSubroutineFunctions; j++) {
- if (sh->SubroutineFunctions[j].index != -1 &&
- sh->SubroutineFunctions[j].index == fn->subroutine_index) {
+ for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
+ if (p->sh.SubroutineFunctions[j].index != -1 &&
+ p->sh.SubroutineFunctions[j].index == fn->subroutine_index) {
linker_error(prog, "each subroutine index qualifier in the "
"shader must be unique\n");
return;
}
}
- sh->SubroutineFunctions[sh->NumSubroutineFunctions].index =
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].index =
fn->subroutine_index;
- if (fn->subroutine_index > (int)sh->MaxSubroutineFunctionIndex)
- sh->MaxSubroutineFunctionIndex = fn->subroutine_index;
+ if (fn->subroutine_index > (int)p->sh.MaxSubroutineFunctionIndex)
+ p->sh.MaxSubroutineFunctionIndex = fn->subroutine_index;
for (int j = 0; j < fn->num_subroutine_types; j++)
- sh->SubroutineFunctions[sh->NumSubroutineFunctions].types[j] = fn->subroutine_types[j];
- sh->NumSubroutineFunctions++;
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types[j] = fn->subroutine_types[j];
+ p->sh.NumSubroutineFunctions++;
}
}
}
return false;
}
- tfeedback_decls = ralloc_array(mem_ctx, tfeedback_decl,
- num_tfeedback_decls);
+ tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
+ num_tfeedback_decls);
if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
varying_names, tfeedback_decls))
return false;
check_image_resources(ctx, prog);
link_check_atomic_counter_resources(ctx, prog);
- if (!prog->LinkStatus)
+ if (!prog->data->LinkStatus)
return false;
for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
lower_ubo_reference(prog->_LinkedShaders[i],
options->ClampBlockIndicesToArrayBounds);
- if (options->LowerShaderSharedVariables)
+ if (i == MESA_SHADER_COMPUTE)
lower_shared_reference(prog->_LinkedShaders[i],
&prog->Comp.SharedSize);
void
link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
{
- prog->LinkStatus = true; /* All error paths will set this to false */
- prog->Validated = false;
- prog->_Used = false;
+ prog->data->LinkStatus = true; /* All error paths will set this to false */
+ prog->data->Validated = false;
/* Section 7.3 (Program Objects) of the OpenGL 4.5 Core Profile spec says:
*
goto done;
}
- prog->Version = max_version;
+ prog->data->Version = max_version;
prog->IsES = prog->Shaders[0]->IsES;
/* Some shaders have to be linked with some other shaders present.
"type of shader\n");
}
- for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
- if (prog->_LinkedShaders[i] != NULL) {
- _mesa_delete_linked_shader(ctx, prog->_LinkedShaders[i]);
- }
-
- prog->_LinkedShaders[i] = NULL;
- }
-
/* Link all shaders for a particular stage and validate the result.
*/
for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
if (num_shaders[stage] > 0) {
gl_linked_shader *const sh =
link_intrastage_shaders(mem_ctx, ctx, prog, shader_list[stage],
- num_shaders[stage]);
+ num_shaders[stage], false);
- if (!prog->LinkStatus) {
+ if (!prog->data->LinkStatus) {
if (sh)
_mesa_delete_linked_shader(ctx, sh);
goto done;
validate_fragment_shader_executable(prog, sh);
break;
}
- if (!prog->LinkStatus) {
+ if (!prog->data->LinkStatus) {
if (sh)
_mesa_delete_linked_shader(ctx, sh);
goto done;
}
prog->_LinkedShaders[stage] = sh;
+ prog->data->linked_stages |= 1 << stage;
}
}
* varyings.
*/
cross_validate_uniforms(prog);
- if (!prog->LinkStatus)
+ if (!prog->data->LinkStatus)
goto done;
unsigned first, last, prev;
num_explicit_uniform_locs = check_explicit_uniform_locations(ctx, prog);
link_assign_subroutine_types(prog);
- if (!prog->LinkStatus)
+ if (!prog->data->LinkStatus)
goto done;
resize_tes_inputs(ctx, prog);
validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
prog->_LinkedShaders[i]);
- if (!prog->LinkStatus)
+ if (!prog->data->LinkStatus)
goto done;
cross_validate_outputs_to_inputs(prog,
prog->_LinkedShaders[prev],
prog->_LinkedShaders[i]);
- if (!prog->LinkStatus)
+ if (!prog->data->LinkStatus)
goto done;
prev = i;
/* Cross-validate uniform blocks between shader stages */
validate_interstage_uniform_blocks(prog, prog->_LinkedShaders);
- if (!prog->LinkStatus)
+ if (!prog->data->LinkStatus)
goto done;
for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
continue;
detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
- if (!prog->LinkStatus)
+ if (!prog->data->LinkStatus)
goto done;
if (ctx->Const.ShaderCompilerOptions[i].LowerCombinedClipCullDistance) {
* with loop induction variable. This check emits a warning or error
* depending if backend can handle dynamic indexing.
*/
- if ((!prog->IsES && prog->Version < 130) ||
- (prog->IsES && prog->Version < 300)) {
+ if ((!prog->IsES && prog->data->Version < 130) ||
+ (prog->IsES && prog->data->Version < 300)) {
if (!validate_sampler_array_indexing(ctx, prog))
goto done;
}