X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fglsl%2Flinker.cpp;h=0c4467779707fcc7b9fb90cb982ef051a242869f;hb=5f72847a885518eacafc05d10e1cb52b978ba061;hp=9a018774f80f086be95f768afdbf8319fc835bd2;hpb=35f11e85cbe82b4bb77535e84e5515a5c49f67a6;p=mesa.git diff --git a/src/glsl/linker.cpp b/src/glsl/linker.cpp index 9a018774f80..0c446777970 100644 --- a/src/glsl/linker.cpp +++ b/src/glsl/linker.cpp @@ -64,6 +64,7 @@ * \author Ian Romanick */ +#include #include "main/core.h" #include "glsl_symbol_table.h" #include "glsl_parser_extras.h" @@ -74,11 +75,11 @@ #include "link_varyings.h" #include "ir_optimization.h" #include "ir_rvalue_visitor.h" +#include "ir_uniform.h" -extern "C" { #include "main/shaderobj.h" #include "main/enums.h" -} + void linker_error(gl_shader_program *, const char *, ...); @@ -249,31 +250,100 @@ public: } }; - /** - * Visitor that determines whether or not a shader uses ir_end_primitive. + * Visitor that determines the highest stream id to which a (geometry) shader + * emits vertices. It also checks whether End{Stream}Primitive is ever called. */ -class find_end_primitive_visitor : public ir_hierarchical_visitor { +class find_emit_vertex_visitor : public ir_hierarchical_visitor { public: - find_end_primitive_visitor() - : found(false) + find_emit_vertex_visitor(int max_allowed) + : max_stream_allowed(max_allowed), + invalid_stream_id(0), + invalid_stream_id_from_emit_vertex(false), + end_primitive_found(false), + uses_non_zero_stream(false) { /* empty */ } - virtual ir_visitor_status visit(ir_end_primitive *) + virtual ir_visitor_status visit_leave(ir_emit_vertex *ir) { - found = true; - return visit_stop; + int stream_id = ir->stream_id(); + + if (stream_id < 0) { + invalid_stream_id = stream_id; + invalid_stream_id_from_emit_vertex = true; + return visit_stop; + } + + if (stream_id > max_stream_allowed) { + invalid_stream_id = stream_id; + invalid_stream_id_from_emit_vertex = true; + return visit_stop; + } + + if (stream_id != 0) + uses_non_zero_stream = true; + + return visit_continue; } - bool end_primitive_found() + virtual ir_visitor_status visit_leave(ir_end_primitive *ir) { - return found; + end_primitive_found = true; + + int stream_id = ir->stream_id(); + + if (stream_id < 0) { + invalid_stream_id = stream_id; + invalid_stream_id_from_emit_vertex = false; + return visit_stop; + } + + if (stream_id > max_stream_allowed) { + invalid_stream_id = stream_id; + invalid_stream_id_from_emit_vertex = false; + return visit_stop; + } + + if (stream_id != 0) + uses_non_zero_stream = true; + + return visit_continue; + } + + bool error() + { + return invalid_stream_id != 0; + } + + const char *error_func() + { + return invalid_stream_id_from_emit_vertex ? + "EmitStreamVertex" : "EndStreamPrimitive"; + } + + int error_stream() + { + return invalid_stream_id; + } + + bool uses_streams() + { + return uses_non_zero_stream; + } + + bool uses_end_primitive() + { + return end_primitive_found; } private: - bool found; + int max_stream_allowed; + int invalid_stream_id; + bool invalid_stream_id_from_emit_vertex; + bool end_primitive_found; + bool uses_non_zero_stream; }; } /* anonymous namespace */ @@ -367,8 +437,8 @@ parse_program_resource_name(const GLchar *name, void link_invalidate_variable_locations(exec_list *ir) { - foreach_list(node, ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); + foreach_in_list(ir_instruction, node, ir) { + ir_variable *const var = node->as_variable(); if (var == NULL) continue; @@ -489,14 +559,21 @@ validate_vertex_shader_executable(struct gl_shader_program *prog, * vertex processing has occurred. Its value is undefined if * the vertex shader executable does not write gl_Position." * - * GLSL ES 3.00 is similar to GLSL 1.40--failing to write to gl_Position is - * not an error. + * All GLSL ES Versions are similar to GLSL 1.40--failing to write to + * gl_Position is not an error. */ if (prog->Version < (prog->IsES ? 300 : 140)) { find_assignment_visitor find("gl_Position"); find.run(shader->ir); if (!find.variable_found()) { - linker_error(prog, "vertex shader does not write to `gl_Position'\n"); + if (prog->IsES) { + linker_warning(prog, + "vertex shader does not write to `gl_Position'." + "It's value is undefined. \n"); + } else { + linker_error(prog, + "vertex shader does not write to `gl_Position'. \n"); + } return; } } @@ -550,10 +627,97 @@ validate_geometry_shader_executable(struct gl_shader_program *prog, analyze_clip_usage(prog, shader, &prog->Geom.UsesClipDistance, &prog->Geom.ClipDistanceArraySize); +} - find_end_primitive_visitor end_primitive; - end_primitive.run(shader->ir); - prog->Geom.UsesEndPrimitive = end_primitive.end_primitive_found(); +/** + * Check if geometry shaders emit to non-zero streams and do corresponding + * validations. + */ +static void +validate_geometry_shader_emissions(struct gl_context *ctx, + struct gl_shader_program *prog) +{ + if (prog->_LinkedShaders[MESA_SHADER_GEOMETRY] != NULL) { + find_emit_vertex_visitor emit_vertex(ctx->Const.MaxVertexStreams - 1); + emit_vertex.run(prog->_LinkedShaders[MESA_SHADER_GEOMETRY]->ir); + if (emit_vertex.error()) { + linker_error(prog, "Invalid call %s(%d). Accepted values for the " + "stream parameter are in the range [0, %d].\n", + emit_vertex.error_func(), + emit_vertex.error_stream(), + ctx->Const.MaxVertexStreams - 1); + } + prog->Geom.UsesStreams = emit_vertex.uses_streams(); + prog->Geom.UsesEndPrimitive = emit_vertex.uses_end_primitive(); + + /* From the ARB_gpu_shader5 spec: + * + * "Multiple vertex streams are supported only if the output primitive + * type is declared to be "points". A program will fail to link if it + * contains a geometry shader calling EmitStreamVertex() or + * EndStreamPrimitive() if its output primitive type is not "points". + * + * However, in the same spec: + * + * "The function EmitVertex() is equivalent to calling EmitStreamVertex() + * with set to zero." + * + * And: + * + * "The function EndPrimitive() is equivalent to calling + * EndStreamPrimitive() with set to zero." + * + * Since we can call EmitVertex() and EndPrimitive() when we output + * primitives other than points, calling EmitStreamVertex(0) or + * EmitEndPrimitive(0) should not produce errors. This it also what Nvidia + * does. Currently we only set prog->Geom.UsesStreams to TRUE when + * EmitStreamVertex() or EmitEndPrimitive() are called with a non-zero + * stream. + */ + if (prog->Geom.UsesStreams && prog->Geom.OutputType != GL_POINTS) { + linker_error(prog, "EmitStreamVertex(n) and EndStreamPrimitive(n) " + "with n>0 requires point output\n"); + } + } +} + +bool +validate_intrastage_arrays(struct gl_shader_program *prog, + ir_variable *const var, + ir_variable *const existing) +{ + /* Consider the types to be "the same" if both types are arrays + * of the same type and one of the arrays is implicitly sized. + * In addition, set the type of the linked variable to the + * explicitly sized array. + */ + if (var->type->is_array() && existing->type->is_array() && + (var->type->fields.array == existing->type->fields.array) && + ((var->type->length == 0)|| (existing->type->length == 0))) { + if (var->type->length != 0) { + if (var->type->length <= existing->data.max_array_access) { + linker_error(prog, "%s `%s' declared as type " + "`%s' but outermost dimension has an index" + " of `%i'\n", + mode_string(var), + var->name, var->type->name, + existing->data.max_array_access); + } + existing->type = var->type; + return true; + } else if (existing->type->length != 0) { + if(existing->type->length <= var->data.max_array_access) { + linker_error(prog, "%s `%s' declared as type " + "`%s' but outermost dimension has an index" + " of `%i'\n", + mode_string(var), + var->name, existing->type->name, + var->data.max_array_access); + } + return true; + } + } + return false; } @@ -574,8 +738,8 @@ cross_validate_globals(struct gl_shader_program *prog, if (shader_list[i] == NULL) continue; - foreach_list(node, shader_list[i]->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); + foreach_in_list(ir_instruction, node, shader_list[i]->ir) { + ir_variable *const var = node->as_variable(); if (var == NULL) continue; @@ -595,31 +759,23 @@ cross_validate_globals(struct gl_shader_program *prog, */ ir_variable *const existing = variables.get_variable(var->name); if (existing != NULL) { - if (var->type != existing->type) { - /* Consider the types to be "the same" if both types are arrays - * of the same type and one of the arrays is implicitly sized. - * In addition, set the type of the linked variable to the - * explicitly sized array. - */ - if (var->type->is_array() - && existing->type->is_array() - && (var->type->fields.array == existing->type->fields.array) - && ((var->type->length == 0) - || (existing->type->length == 0))) { - if (var->type->length != 0) { - existing->type = var->type; - } - } else if (var->type->is_record() - && existing->type->is_record() - && existing->type->record_compare(var->type)) { - existing->type = var->type; - } else { - linker_error(prog, "%s `%s' declared as type " - "`%s' and type `%s'\n", - mode_string(var), - var->name, var->type->name, - existing->type->name); - return; + /* Check if types match. Interface blocks have some special + * rules so we handle those elsewhere. + */ + if (var->type != existing->type && + !var->is_interface_instance()) { + if (!validate_intrastage_arrays(prog, var, existing)) { + if (var->type->is_record() && existing->type->is_record() + && existing->type->record_compare(var->type)) { + existing->type = var->type; + } else { + linker_error(prog, "%s `%s' declared as type " + "`%s' and type `%s'\n", + mode_string(var), + var->name, var->type->name, + existing->type->name); + return; + } } } @@ -683,7 +839,7 @@ cross_validate_globals(struct gl_shader_program *prog, linker_error(prog, "All redeclarations of gl_FragDepth in all " "fragment shaders in a single program must have " - "the same set of qualifiers."); + "the same set of qualifiers.\n"); } if (var->data.used && layout_differs) { @@ -692,7 +848,7 @@ cross_validate_globals(struct gl_shader_program *prog, "qualifier in any fragment shader, it must be " "redeclared with the same layout qualifier in " "all fragment shaders that have assignments to " - "gl_FragDepth"); + "gl_FragDepth\n"); } } @@ -823,7 +979,7 @@ interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog) &sh->UniformBlocks[j]); if (index == -1) { - linker_error(prog, "uniform block `%s' has mismatching definitions", + linker_error(prog, "uniform block `%s' has mismatching definitions\n", sh->UniformBlocks[j].Name); return false; } @@ -844,15 +1000,15 @@ populate_symbol_table(gl_shader *sh) { sh->symbols = new(sh) glsl_symbol_table; - foreach_list(node, sh->ir) { - ir_instruction *const inst = (ir_instruction *) node; + foreach_in_list(ir_instruction, inst, sh->ir) { ir_variable *var; ir_function *func; if ((func = inst->as_function()) != NULL) { sh->symbols->add_function(func); } else if ((var = inst->as_variable()) != NULL) { - sh->symbols->add_variable(var); + if (var->data.mode != ir_var_temporary) + sh->symbols->add_variable(var); } } } @@ -961,9 +1117,7 @@ move_non_declarations(exec_list *instructions, exec_node *last, temps = hash_table_ctor(0, hash_table_pointer_hash, hash_table_pointer_compare); - foreach_list_safe(node, instructions) { - ir_instruction *inst = (ir_instruction *) node; - + foreach_in_list_safe(ir_instruction, inst, instructions) { if (inst->as_function()) continue; @@ -1000,8 +1154,8 @@ move_non_declarations(exec_list *instructions, exec_node *last, /** * Get the function signature for main from a shader */ -static ir_function_signature * -get_main_function_signature(gl_shader *sh) +ir_function_signature * +link_get_main_function_signature(gl_shader *sh) { ir_function *const f = sh->symbols->get_function("main"); if (f != NULL) { @@ -1014,7 +1168,8 @@ get_main_function_signature(gl_shader *sh) * We don't have to check for multiple definitions of main (in multiple * shaders) because that would have already been caught above. */ - ir_function_signature *sig = f->matching_signature(NULL, &void_parameters); + ir_function_signature *sig = + f->matching_signature(NULL, &void_parameters, false); if ((sig != NULL) && sig->is_defined) { return sig; } @@ -1050,7 +1205,8 @@ public: if (var->type->is_interface()) { if (interface_contains_unsized_arrays(var->type)) { const glsl_type *new_type = - resize_interface_members(var->type, var->max_ifc_array_access); + resize_interface_members(var->type, + var->get_max_ifc_array_access()); var->type = new_type; var->change_interface_type(new_type); } @@ -1059,7 +1215,7 @@ public: if (interface_contains_unsized_arrays(var->type->fields.array)) { const glsl_type *new_type = resize_interface_members(var->type->fields.array, - var->max_ifc_array_access); + var->get_max_ifc_array_access()); var->change_interface_type(new_type); var->type = glsl_type::get_array_instance(new_type, var->type->length); @@ -1210,7 +1366,8 @@ link_fs_input_layout_qualifiers(struct gl_shader_program *prog, linked_shader->origin_upper_left = false; linked_shader->pixel_center_integer = false; - if (linked_shader->Stage != MESA_SHADER_FRAGMENT || prog->Version < 150) + if (linked_shader->Stage != MESA_SHADER_FRAGMENT || + (prog->Version < 150 && !prog->ARB_fragment_coord_conventions_enable)) return; for (unsigned i = 0; i < num_shaders; i++) { @@ -1477,13 +1634,15 @@ link_intrastage_shaders(void *mem_ctx, const unsigned num_uniform_blocks = link_uniform_blocks(mem_ctx, prog, shader_list, num_shaders, &uniform_blocks); + if (!prog->LinkStatus) + return NULL; /* Check that there is only a single definition of each function signature * across all shaders. */ for (unsigned i = 0; i < (num_shaders - 1); i++) { - foreach_list(node, shader_list[i]->ir) { - ir_function *const f = ((ir_instruction *) node)->as_function(); + foreach_in_list(ir_instruction, node, shader_list[i]->ir) { + ir_function *const f = node->as_function(); if (f == NULL) continue; @@ -1498,9 +1657,7 @@ link_intrastage_shaders(void *mem_ctx, if (other == NULL) continue; - foreach_list(n, &f->signatures) { - ir_function_signature *sig = (ir_function_signature *) n; - + foreach_in_list(ir_function_signature, sig, &f->signatures) { if (!sig->is_defined || sig->is_builtin()) continue; @@ -1509,7 +1666,7 @@ link_intrastage_shaders(void *mem_ctx, if ((other_sig != NULL) && other_sig->is_defined && !other_sig->is_builtin()) { - linker_error(prog, "function `%s' is multiply defined", + linker_error(prog, "function `%s' is multiply defined\n", f->name); return NULL; } @@ -1527,7 +1684,7 @@ link_intrastage_shaders(void *mem_ctx, */ gl_shader *main = NULL; for (unsigned i = 0; i < num_shaders; i++) { - if (get_main_function_signature(shader_list[i]) != NULL) { + if (link_get_main_function_signature(shader_list[i]) != NULL) { main = shader_list[i]; break; } @@ -1553,10 +1710,11 @@ link_intrastage_shaders(void *mem_ctx, populate_symbol_table(linked); - /* The a pointer to the main function in the final linked shader (i.e., the + /* The pointer to the main function in the final linked shader (i.e., the * copy of the original shader that contained the main function). */ - ir_function_signature *const main_sig = get_main_function_signature(linked); + ir_function_signature *const main_sig = + link_get_main_function_signature(linked); /* Move any instructions other than variable declarations or function * declarations into main. @@ -1589,12 +1747,19 @@ link_intrastage_shaders(void *mem_ctx, */ gl_shader **linking_shaders = (gl_shader **) calloc(num_shaders + 1, sizeof(gl_shader *)); - memcpy(linking_shaders, shader_list, num_shaders * sizeof(gl_shader *)); - linking_shaders[num_shaders] = _mesa_glsl_get_builtin_function_shader(); - ok = link_function_calls(prog, linked, linking_shaders, num_shaders + 1); + ok = linking_shaders != NULL; + + if (ok) { + memcpy(linking_shaders, shader_list, num_shaders * sizeof(gl_shader *)); + linking_shaders[num_shaders] = _mesa_glsl_get_builtin_function_shader(); + + ok = link_function_calls(prog, linked, linking_shaders, num_shaders + 1); - free(linking_shaders); + free(linking_shaders); + } else { + _mesa_error_no_memory(__func__); + } } else { ok = link_function_calls(prog, linked, shader_list, num_shaders); } @@ -1614,12 +1779,14 @@ link_intrastage_shaders(void *mem_ctx, if (linked->Stage == MESA_SHADER_GEOMETRY) { unsigned num_vertices = vertices_per_prim(prog->Geom.InputType); geom_array_resize_visitor input_resize_visitor(num_vertices, prog); - foreach_list(n, linked->ir) { - ir_instruction *ir = (ir_instruction *) n; + foreach_in_list(ir_instruction, ir, linked->ir) { ir->accept(&input_resize_visitor); } } + if (ctx->Const.VertexID_is_zero_based) + lower_vertex_id(linked); + /* Make a pass over all variable declarations to ensure that arrays with * unspecified sizes have a size specified. The size is inferred from the * max_array_access field. @@ -1653,8 +1820,8 @@ update_array_sizes(struct gl_shader_program *prog) if (prog->_LinkedShaders[i] == NULL) continue; - foreach_list(node, prog->_LinkedShaders[i]->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); + foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) { + ir_variable *const var = node->as_variable(); if ((var == NULL) || (var->data.mode != ir_var_uniform) || !var->type->is_array()) @@ -1676,8 +1843,8 @@ update_array_sizes(struct gl_shader_program *prog) if (prog->_LinkedShaders[j] == NULL) continue; - foreach_list(node2, prog->_LinkedShaders[j]->ir) { - ir_variable *other_var = ((ir_instruction *) node2)->as_variable(); + foreach_in_list(ir_instruction, node2, prog->_LinkedShaders[j]->ir) { + ir_variable *other_var = node2->as_variable(); if (!other_var) continue; @@ -1697,9 +1864,10 @@ update_array_sizes(struct gl_shader_program *prog) * Determine the number of slots per array element by dividing by * the old (total) size. */ - if (var->num_state_slots > 0) { - var->num_state_slots = (size + 1) - * (var->num_state_slots / var->type->length); + const unsigned num_slots = var->get_num_state_slots(); + if (num_slots > 0) { + var->set_num_state_slots((size + 1) + * (num_slots / var->type->length)); } var->type = glsl_type::get_array_instance(var->type->fields.array, @@ -1745,7 +1913,7 @@ find_available_slots(unsigned used_mask, unsigned needed_count) /** - * Assign locations for either VS inputs for FS outputs + * Assign locations for either VS inputs or FS outputs * * \param prog Shader program whose variables need locations assigned * \param target_index Selector for the program target to receive location @@ -1819,8 +1987,8 @@ assign_attribute_or_color_locations(gl_shader_program *prog, unsigned num_attr = 0; - foreach_list(node, sh->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); + foreach_in_list(ir_instruction, node, sh->ir) { + ir_variable *const var = node->as_variable(); if ((var == NULL) || (var->data.mode != (unsigned) direction)) continue; @@ -1949,7 +2117,7 @@ assign_attribute_or_color_locations(gl_shader_program *prog, if (attr + slots > max_index) { linker_error(prog, "insufficient contiguous locations " - "available for %s `%s' %d %d %d", string, + "available for %s `%s' %d %d %d\n", string, var->name, used_locations, use_mask, attr); return false; } @@ -2018,7 +2186,7 @@ assign_attribute_or_color_locations(gl_shader_program *prog, linker_error(prog, "insufficient contiguous locations " - "available for %s `%s'", + "available for %s `%s'\n", string, to_assign[i].var->name); return false; } @@ -2038,8 +2206,8 @@ assign_attribute_or_color_locations(gl_shader_program *prog, void demote_shader_inputs_and_outputs(gl_shader *sh, enum ir_variable_mode mode) { - foreach_list(node, sh->ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); + foreach_in_list(ir_instruction, node, sh->ir) { + ir_variable *const var = node->as_variable(); if ((var == NULL) || (var->data.mode != int(mode))) continue; @@ -2049,6 +2217,7 @@ demote_shader_inputs_and_outputs(gl_shader *sh, enum ir_variable_mode mode) * to have a location assigned. */ if (var->data.is_unmatched_generic_inout) { + assert(var->data.mode != ir_var_temporary); var->data.mode = ir_var_auto; } } @@ -2074,8 +2243,8 @@ store_fragdepth_layout(struct gl_shader_program *prog) * We're only interested in the cases where the variable is NOT removed * from the IR. */ - foreach_list(node, ir) { - ir_variable *const var = ((ir_instruction *) node)->as_variable(); + foreach_in_list(ir_instruction, node, ir) { + ir_variable *const var = node->as_variable(); if (var == NULL || var->data.mode != ir_var_shader_out) { continue; @@ -2119,7 +2288,7 @@ check_resources(struct gl_context *ctx, struct gl_shader_program *prog) continue; if (sh->num_samplers > ctx->Const.Program[i].MaxTextureImageUnits) { - linker_error(prog, "Too many %s shader texture samplers", + linker_error(prog, "Too many %s shader texture samplers\n", _mesa_shader_stage_to_string(i)); } @@ -2133,7 +2302,7 @@ check_resources(struct gl_context *ctx, struct gl_shader_program *prog) _mesa_shader_stage_to_string(i)); } else { linker_error(prog, "Too many %s shader default uniform block " - "components", + "components\n", _mesa_shader_stage_to_string(i)); } } @@ -2146,7 +2315,7 @@ check_resources(struct gl_context *ctx, struct gl_shader_program *prog) "this is non-portable out-of-spec behavior\n", _mesa_shader_stage_to_string(i)); } else { - linker_error(prog, "Too many %s shader uniform components", + linker_error(prog, "Too many %s shader uniform components\n", _mesa_shader_stage_to_string(i)); } } @@ -2164,7 +2333,7 @@ check_resources(struct gl_context *ctx, struct gl_shader_program *prog) } if (total_uniform_blocks > ctx->Const.MaxCombinedUniformBlocks) { - linker_error(prog, "Too many combined uniform blocks (%d/%d)", + linker_error(prog, "Too many combined uniform blocks (%d/%d)\n", prog->NumUniformBlocks, ctx->Const.MaxCombinedUniformBlocks); } else { @@ -2172,7 +2341,7 @@ check_resources(struct gl_context *ctx, struct gl_shader_program *prog) const unsigned max_uniform_blocks = ctx->Const.Program[i].MaxUniformBlocks; if (blocks[i] > max_uniform_blocks) { - linker_error(prog, "Too many %s uniform blocks (%d/%d)", + linker_error(prog, "Too many %s uniform blocks (%d/%d)\n", _mesa_shader_stage_to_string(i), blocks[i], max_uniform_blocks); @@ -2200,14 +2369,14 @@ check_image_resources(struct gl_context *ctx, struct gl_shader_program *prog) if (sh) { if (sh->NumImages > ctx->Const.Program[i].MaxImageUniforms) - linker_error(prog, "Too many %s shader image uniforms", + linker_error(prog, "Too many %s shader image uniforms\n", _mesa_shader_stage_to_string(i)); total_image_units += sh->NumImages; if (i == MESA_SHADER_FRAGMENT) { - foreach_list(node, sh->ir) { - ir_variable *var = ((ir_instruction *)node)->as_variable(); + foreach_in_list(ir_instruction, node, sh->ir) { + ir_variable *var = node->as_variable(); if (var && var->data.mode == ir_var_shader_out) fragment_outputs += var->type->count_attribute_slots(); } @@ -2216,11 +2385,122 @@ check_image_resources(struct gl_context *ctx, struct gl_shader_program *prog) } if (total_image_units > ctx->Const.MaxCombinedImageUniforms) - linker_error(prog, "Too many combined image uniforms"); + linker_error(prog, "Too many combined image uniforms\n"); if (total_image_units + fragment_outputs > ctx->Const.MaxCombinedImageUnitsAndFragmentOutputs) - linker_error(prog, "Too many combined image uniforms and fragment outputs"); + linker_error(prog, "Too many combined image uniforms and fragment outputs\n"); +} + + +/** + * Initializes explicit location slots to INACTIVE_UNIFORM_EXPLICIT_LOCATION + * for a variable, checks for overlaps between other uniforms using explicit + * locations. + */ +static bool +reserve_explicit_locations(struct gl_shader_program *prog, + string_to_uint_map *map, ir_variable *var) +{ + unsigned slots = var->type->uniform_locations(); + unsigned max_loc = var->data.location + slots - 1; + + /* Resize remap table if locations do not fit in the current one. */ + if (max_loc + 1 > prog->NumUniformRemapTable) { + prog->UniformRemapTable = + reralloc(prog, prog->UniformRemapTable, + gl_uniform_storage *, + max_loc + 1); + + if (!prog->UniformRemapTable) { + linker_error(prog, "Out of memory during linking.\n"); + return false; + } + + /* Initialize allocated space. */ + for (unsigned i = prog->NumUniformRemapTable; i < max_loc + 1; i++) + prog->UniformRemapTable[i] = NULL; + + prog->NumUniformRemapTable = max_loc + 1; + } + + for (unsigned i = 0; i < slots; i++) { + unsigned loc = var->data.location + i; + + /* Check if location is already used. */ + if (prog->UniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) { + + /* Possibly same uniform from a different stage, this is ok. */ + unsigned hash_loc; + if (map->get(hash_loc, var->name) && hash_loc == loc - i) + continue; + + /* ARB_explicit_uniform_location specification states: + * + * "No two default-block uniform variables in the program can have + * the same location, even if they are unused, otherwise a compiler + * or linker error will be generated." + */ + linker_error(prog, + "location qualifier for uniform %s overlaps " + "previously used location\n", + var->name); + return false; + } + + /* Initialize location as inactive before optimization + * rounds and location assignment. + */ + prog->UniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION; + } + + /* Note, base location used for arrays. */ + map->put(var->data.location, var->name); + + return true; +} + +/** + * Check and reserve all explicit uniform locations, called before + * any optimizations happen to handle also inactive uniforms and + * inactive array elements that may get trimmed away. + */ +static void +check_explicit_uniform_locations(struct gl_context *ctx, + struct gl_shader_program *prog) +{ + if (!ctx->Extensions.ARB_explicit_uniform_location) + return; + + /* This map is used to detect if overlapping explicit locations + * occur with the same uniform (from different stage) or a different one. + */ + string_to_uint_map *uniform_map = new string_to_uint_map; + + if (!uniform_map) { + linker_error(prog, "Out of memory during linking.\n"); + return; + } + + for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { + struct gl_shader *sh = prog->_LinkedShaders[i]; + + if (!sh) + continue; + + foreach_in_list(ir_instruction, node, sh->ir) { + ir_variable *var = node->as_variable(); + if ((var && var->data.mode == ir_var_uniform) && + var->data.explicit_location) { + if (!reserve_explicit_locations(prog, uniform_map, var)) { + delete uniform_map; + return; + } + } + } + } + + delete uniform_map; } void @@ -2235,20 +2515,7 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) prog->Validated = false; prog->_Used = false; - ralloc_free(prog->InfoLog); - prog->InfoLog = ralloc_strdup(NULL, ""); - - ralloc_free(prog->UniformBlocks); - prog->UniformBlocks = NULL; - prog->NumUniformBlocks = 0; - for (int i = 0; i < MESA_SHADER_STAGES; i++) { - ralloc_free(prog->UniformBlockStageIndex[i]); - prog->UniformBlockStageIndex[i] = NULL; - } - - ralloc_free(prog->AtomicBuffers); - prog->AtomicBuffers = NULL; - prog->NumAtomicBuffers = 0; + prog->ARB_fragment_coord_conventions_enable = false; /* Separate the shaders into groups based on their type. */ @@ -2275,6 +2542,9 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) goto done; } + prog->ARB_fragment_coord_conventions_enable |= + prog->Shaders[i]->ARB_fragment_coord_conventions_enable; + gl_shader_stage shader_type = prog->Shaders[i]->Stage; shader_list[shader_type][num_shaders[shader_type]] = prog->Shaders[i]; num_shaders[shader_type]++; @@ -2295,7 +2565,8 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) /* Geometry shaders have to be linked with vertex shaders. */ if (num_shaders[MESA_SHADER_GEOMETRY] > 0 && - num_shaders[MESA_SHADER_VERTEX] == 0) { + num_shaders[MESA_SHADER_VERTEX] == 0 && + !prog->SeparateShader) { linker_error(prog, "Geometry shader must be linked with " "vertex shader\n"); goto done; @@ -2366,6 +2637,10 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) break; } + check_explicit_uniform_locations(ctx, prog); + if (!prog->LinkStatus) + goto done; + /* Validate the inputs of each stage with the output of the preceding * stage. */ @@ -2426,16 +2701,21 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) if (!prog->LinkStatus) goto done; - if (ctx->ShaderCompilerOptions[i].LowerClipDistance) { + if (ctx->Const.ShaderCompilerOptions[i].LowerClipDistance) { lower_clip_distance(prog->_LinkedShaders[i]); } while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, false, - &ctx->ShaderCompilerOptions[i], + &ctx->Const.ShaderCompilerOptions[i], ctx->Const.NativeIntegers)) ; + + lower_const_arrays_to_uniforms(prog->_LinkedShaders[i]->ir); } + /* Check and validate stream emissions in geometry shaders */ + validate_geometry_shader_emissions(ctx, prog); + /* Mark all generic shader inputs and outputs as unpaired. */ for (unsigned i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) { if (prog->_LinkedShaders[i] != NULL) { @@ -2472,7 +2752,7 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) */ if (first == MESA_SHADER_FRAGMENT) { linker_error(prog, "Transform feedback varyings specified, but " - "no vertex or geometry shader is present."); + "no vertex or geometry shader is present.\n"); goto done; } @@ -2497,7 +2777,22 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) if (last >= 0 && last < MESA_SHADER_FRAGMENT) { gl_shader *const sh = prog->_LinkedShaders[last]; - if (num_tfeedback_decls != 0) { + if (first == MESA_SHADER_GEOMETRY) { + /* There was no vertex shader, but we still have to assign varying + * locations for use by geometry shader inputs in SSO. + * + * If the shader is not separable (i.e., prog->SeparateShader is + * false), linking will have already failed when first is + * MESA_SHADER_GEOMETRY. + */ + if (!assign_varying_locations(ctx, mem_ctx, prog, + NULL, sh, + num_tfeedback_decls, tfeedback_decls, + prog->Geom.VerticesIn)) + goto done; + } + + if (num_tfeedback_decls != 0 || prog->SeparateShader) { /* There was no fragment shader, but we still have to assign varying * locations for use by transform feedback. */ @@ -2511,7 +2806,8 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls, tfeedback_decls); - demote_shader_inputs_and_outputs(sh, ir_var_shader_out); + if (!prog->SeparateShader) + demote_shader_inputs_and_outputs(sh, ir_var_shader_out); /* Eliminate code that is now dead due to unused outputs being demoted. */ @@ -2526,7 +2822,16 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) do_dead_builtin_varyings(ctx, NULL, sh, num_tfeedback_decls, tfeedback_decls); - demote_shader_inputs_and_outputs(sh, ir_var_shader_in); + if (prog->SeparateShader) { + if (!assign_varying_locations(ctx, mem_ctx, prog, + NULL /* producer */, + sh /* consumer */, + 0 /* num_tfeedback_decls */, + NULL /* tfeedback_decls */, + 0 /* gs_input_vertices */)) + goto done; + } else + demote_shader_inputs_and_outputs(sh, ir_var_shader_in); while (do_dead_code(sh->ir, false)) ; @@ -2574,7 +2879,7 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) goto done; update_array_sizes(prog); - link_assign_uniform_locations(prog); + link_assign_uniform_locations(prog, ctx->Const.UniformBooleanTrue); link_assign_atomic_counter_resources(ctx, prog); store_fragdepth_layout(prog); @@ -2591,7 +2896,7 @@ link_shaders(struct gl_context *ctx, struct gl_shader_program *prog) * fragment shader) is absent. So, the extension shouldn't change the * behavior specified in GLSL specification. */ - if (!prog->InternalSeparateShader && ctx->API == API_OPENGLES2) { + if (!prog->SeparateShader && ctx->API == API_OPENGLES2) { if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) { linker_error(prog, "program lacks a vertex shader\n"); } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {