2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
26 * GLSL linker implementation
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
35 * - Undefined references in each shader are resolve to definitions in
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
64 * \author Ian Romanick <ian.d.romanick@intel.com>
68 #include "util/strndup.h"
69 #include "main/core.h"
70 #include "glsl_symbol_table.h"
71 #include "glsl_parser_extras.h"
74 #include "program/hash_table.h"
76 #include "link_varyings.h"
77 #include "ir_optimization.h"
78 #include "ir_rvalue_visitor.h"
79 #include "ir_uniform.h"
81 #include "main/shaderobj.h"
82 #include "main/enums.h"
85 void linker_error(gl_shader_program
*, const char *, ...);
90 * Visitor that determines whether or not a variable is ever written.
92 class find_assignment_visitor
: public ir_hierarchical_visitor
{
94 find_assignment_visitor(const char *name
)
95 : name(name
), found(false)
100 virtual ir_visitor_status
visit_enter(ir_assignment
*ir
)
102 ir_variable
*const var
= ir
->lhs
->variable_referenced();
104 if (strcmp(name
, var
->name
) == 0) {
109 return visit_continue_with_parent
;
112 virtual ir_visitor_status
visit_enter(ir_call
*ir
)
114 foreach_two_lists(formal_node
, &ir
->callee
->parameters
,
115 actual_node
, &ir
->actual_parameters
) {
116 ir_rvalue
*param_rval
= (ir_rvalue
*) actual_node
;
117 ir_variable
*sig_param
= (ir_variable
*) formal_node
;
119 if (sig_param
->data
.mode
== ir_var_function_out
||
120 sig_param
->data
.mode
== ir_var_function_inout
) {
121 ir_variable
*var
= param_rval
->variable_referenced();
122 if (var
&& strcmp(name
, var
->name
) == 0) {
129 if (ir
->return_deref
!= NULL
) {
130 ir_variable
*const var
= ir
->return_deref
->variable_referenced();
132 if (strcmp(name
, var
->name
) == 0) {
138 return visit_continue_with_parent
;
141 bool variable_found()
147 const char *name
; /**< Find writes to a variable with this name. */
148 bool found
; /**< Was a write to the variable found? */
153 * Visitor that determines whether or not a variable is ever read.
155 class find_deref_visitor
: public ir_hierarchical_visitor
{
157 find_deref_visitor(const char *name
)
158 : name(name
), found(false)
163 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
)
165 if (strcmp(this->name
, ir
->var
->name
) == 0) {
170 return visit_continue
;
173 bool variable_found() const
179 const char *name
; /**< Find writes to a variable with this name. */
180 bool found
; /**< Was a write to the variable found? */
184 class geom_array_resize_visitor
: public ir_hierarchical_visitor
{
186 unsigned num_vertices
;
187 gl_shader_program
*prog
;
189 geom_array_resize_visitor(unsigned num_vertices
, gl_shader_program
*prog
)
191 this->num_vertices
= num_vertices
;
195 virtual ~geom_array_resize_visitor()
200 virtual ir_visitor_status
visit(ir_variable
*var
)
202 if (!var
->type
->is_array() || var
->data
.mode
!= ir_var_shader_in
)
203 return visit_continue
;
205 unsigned size
= var
->type
->length
;
207 /* Generate a link error if the shader has declared this array with an
210 if (size
&& size
!= this->num_vertices
) {
211 linker_error(this->prog
, "size of array %s declared as %u, "
212 "but number of input vertices is %u\n",
213 var
->name
, size
, this->num_vertices
);
214 return visit_continue
;
217 /* Generate a link error if the shader attempts to access an input
218 * array using an index too large for its actual size assigned at link
221 if (var
->data
.max_array_access
>= this->num_vertices
) {
222 linker_error(this->prog
, "geometry shader accesses element %i of "
223 "%s, but only %i input vertices\n",
224 var
->data
.max_array_access
, var
->name
, this->num_vertices
);
225 return visit_continue
;
228 var
->type
= glsl_type::get_array_instance(var
->type
->fields
.array
,
230 var
->data
.max_array_access
= this->num_vertices
- 1;
232 return visit_continue
;
235 /* Dereferences of input variables need to be updated so that their type
236 * matches the newly assigned type of the variable they are accessing. */
237 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
)
239 ir
->type
= ir
->var
->type
;
240 return visit_continue
;
243 /* Dereferences of 2D input arrays need to be updated so that their type
244 * matches the newly assigned type of the array they are accessing. */
245 virtual ir_visitor_status
visit_leave(ir_dereference_array
*ir
)
247 const glsl_type
*const vt
= ir
->array
->type
;
249 ir
->type
= vt
->fields
.array
;
250 return visit_continue
;
254 class tess_eval_array_resize_visitor
: public ir_hierarchical_visitor
{
256 unsigned num_vertices
;
257 gl_shader_program
*prog
;
259 tess_eval_array_resize_visitor(unsigned num_vertices
, gl_shader_program
*prog
)
261 this->num_vertices
= num_vertices
;
265 virtual ~tess_eval_array_resize_visitor()
270 virtual ir_visitor_status
visit(ir_variable
*var
)
272 if (!var
->type
->is_array() || var
->data
.mode
!= ir_var_shader_in
|| var
->data
.patch
)
273 return visit_continue
;
275 var
->type
= glsl_type::get_array_instance(var
->type
->fields
.array
,
277 var
->data
.max_array_access
= this->num_vertices
- 1;
279 return visit_continue
;
282 /* Dereferences of input variables need to be updated so that their type
283 * matches the newly assigned type of the variable they are accessing. */
284 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
)
286 ir
->type
= ir
->var
->type
;
287 return visit_continue
;
290 /* Dereferences of 2D input arrays need to be updated so that their type
291 * matches the newly assigned type of the array they are accessing. */
292 virtual ir_visitor_status
visit_leave(ir_dereference_array
*ir
)
294 const glsl_type
*const vt
= ir
->array
->type
;
296 ir
->type
= vt
->fields
.array
;
297 return visit_continue
;
301 class barrier_use_visitor
: public ir_hierarchical_visitor
{
303 barrier_use_visitor(gl_shader_program
*prog
)
304 : prog(prog
), in_main(false), after_return(false), control_flow(0)
308 virtual ~barrier_use_visitor()
313 virtual ir_visitor_status
visit_enter(ir_function
*ir
)
315 if (strcmp(ir
->name
, "main") == 0)
318 return visit_continue
;
321 virtual ir_visitor_status
visit_leave(ir_function
*)
324 after_return
= false;
325 return visit_continue
;
328 virtual ir_visitor_status
visit_leave(ir_return
*)
331 return visit_continue
;
334 virtual ir_visitor_status
visit_enter(ir_if
*)
337 return visit_continue
;
340 virtual ir_visitor_status
visit_leave(ir_if
*)
343 return visit_continue
;
346 virtual ir_visitor_status
visit_enter(ir_loop
*)
349 return visit_continue
;
352 virtual ir_visitor_status
visit_leave(ir_loop
*)
355 return visit_continue
;
358 /* FINISHME: `switch` is not expressed at the IR level -- it's already
359 * been lowered to a mess of `if`s. We'll correctly disallow any use of
360 * barrier() in a conditional path within the switch, but not in a path
361 * which is always hit.
364 virtual ir_visitor_status
visit_enter(ir_call
*ir
)
366 if (ir
->use_builtin
&& strcmp(ir
->callee_name(), "barrier") == 0) {
367 /* Use of barrier(); determine if it is legal: */
369 linker_error(prog
, "Builtin barrier() may only be used in main");
374 linker_error(prog
, "Builtin barrier() may not be used after return");
378 if (control_flow
!= 0) {
379 linker_error(prog
, "Builtin barrier() may not be used inside control flow");
383 return visit_continue
;
387 gl_shader_program
*prog
;
388 bool in_main
, after_return
;
393 * Visitor that determines the highest stream id to which a (geometry) shader
394 * emits vertices. It also checks whether End{Stream}Primitive is ever called.
396 class find_emit_vertex_visitor
: public ir_hierarchical_visitor
{
398 find_emit_vertex_visitor(int max_allowed
)
399 : max_stream_allowed(max_allowed
),
400 invalid_stream_id(0),
401 invalid_stream_id_from_emit_vertex(false),
402 end_primitive_found(false),
403 uses_non_zero_stream(false)
408 virtual ir_visitor_status
visit_leave(ir_emit_vertex
*ir
)
410 int stream_id
= ir
->stream_id();
413 invalid_stream_id
= stream_id
;
414 invalid_stream_id_from_emit_vertex
= true;
418 if (stream_id
> max_stream_allowed
) {
419 invalid_stream_id
= stream_id
;
420 invalid_stream_id_from_emit_vertex
= true;
425 uses_non_zero_stream
= true;
427 return visit_continue
;
430 virtual ir_visitor_status
visit_leave(ir_end_primitive
*ir
)
432 end_primitive_found
= true;
434 int stream_id
= ir
->stream_id();
437 invalid_stream_id
= stream_id
;
438 invalid_stream_id_from_emit_vertex
= false;
442 if (stream_id
> max_stream_allowed
) {
443 invalid_stream_id
= stream_id
;
444 invalid_stream_id_from_emit_vertex
= false;
449 uses_non_zero_stream
= true;
451 return visit_continue
;
456 return invalid_stream_id
!= 0;
459 const char *error_func()
461 return invalid_stream_id_from_emit_vertex
?
462 "EmitStreamVertex" : "EndStreamPrimitive";
467 return invalid_stream_id
;
472 return uses_non_zero_stream
;
475 bool uses_end_primitive()
477 return end_primitive_found
;
481 int max_stream_allowed
;
482 int invalid_stream_id
;
483 bool invalid_stream_id_from_emit_vertex
;
484 bool end_primitive_found
;
485 bool uses_non_zero_stream
;
488 /* Class that finds array derefs and check if indexes are dynamic. */
489 class dynamic_sampler_array_indexing_visitor
: public ir_hierarchical_visitor
492 dynamic_sampler_array_indexing_visitor() :
493 dynamic_sampler_array_indexing(false)
497 ir_visitor_status
visit_enter(ir_dereference_array
*ir
)
499 if (!ir
->variable_referenced())
500 return visit_continue
;
502 if (!ir
->variable_referenced()->type
->contains_sampler())
503 return visit_continue
;
505 if (!ir
->array_index
->constant_expression_value()) {
506 dynamic_sampler_array_indexing
= true;
509 return visit_continue
;
512 bool uses_dynamic_sampler_array_indexing()
514 return dynamic_sampler_array_indexing
;
518 bool dynamic_sampler_array_indexing
;
521 } /* anonymous namespace */
524 linker_error(gl_shader_program
*prog
, const char *fmt
, ...)
528 ralloc_strcat(&prog
->InfoLog
, "error: ");
530 ralloc_vasprintf_append(&prog
->InfoLog
, fmt
, ap
);
533 prog
->LinkStatus
= false;
538 linker_warning(gl_shader_program
*prog
, const char *fmt
, ...)
542 ralloc_strcat(&prog
->InfoLog
, "warning: ");
544 ralloc_vasprintf_append(&prog
->InfoLog
, fmt
, ap
);
551 * Given a string identifying a program resource, break it into a base name
552 * and an optional array index in square brackets.
554 * If an array index is present, \c out_base_name_end is set to point to the
555 * "[" that precedes the array index, and the array index itself is returned
558 * If no array index is present (or if the array index is negative or
559 * mal-formed), \c out_base_name_end, is set to point to the null terminator
560 * at the end of the input string, and -1 is returned.
562 * Only the final array index is parsed; if the string contains other array
563 * indices (or structure field accesses), they are left in the base name.
565 * No attempt is made to check that the base name is properly formed;
566 * typically the caller will look up the base name in a hash table, so
567 * ill-formed base names simply turn into hash table lookup failures.
570 parse_program_resource_name(const GLchar
*name
,
571 const GLchar
**out_base_name_end
)
573 /* Section 7.3.1 ("Program Interfaces") of the OpenGL 4.3 spec says:
575 * "When an integer array element or block instance number is part of
576 * the name string, it will be specified in decimal form without a "+"
577 * or "-" sign or any extra leading zeroes. Additionally, the name
578 * string will not include white space anywhere in the string."
581 const size_t len
= strlen(name
);
582 *out_base_name_end
= name
+ len
;
584 if (len
== 0 || name
[len
-1] != ']')
587 /* Walk backwards over the string looking for a non-digit character. This
588 * had better be the opening bracket for an array index.
590 * Initially, i specifies the location of the ']'. Since the string may
591 * contain only the ']' charcater, walk backwards very carefully.
594 for (i
= len
- 1; (i
> 0) && isdigit(name
[i
-1]); --i
)
597 if ((i
== 0) || name
[i
-1] != '[')
600 long array_index
= strtol(&name
[i
], NULL
, 10);
604 /* Check for leading zero */
605 if (name
[i
] == '0' && name
[i
+1] != ']')
608 *out_base_name_end
= name
+ (i
- 1);
614 link_invalidate_variable_locations(exec_list
*ir
)
616 foreach_in_list(ir_instruction
, node
, ir
) {
617 ir_variable
*const var
= node
->as_variable();
622 /* Only assign locations for variables that lack an explicit location.
623 * Explicit locations are set for all built-in variables, generic vertex
624 * shader inputs (via layout(location=...)), and generic fragment shader
625 * outputs (also via layout(location=...)).
627 if (!var
->data
.explicit_location
) {
628 var
->data
.location
= -1;
629 var
->data
.location_frac
= 0;
632 /* ir_variable::is_unmatched_generic_inout is used by the linker while
633 * connecting outputs from one stage to inputs of the next stage.
635 if (var
->data
.explicit_location
&&
636 var
->data
.location
< VARYING_SLOT_VAR0
) {
637 var
->data
.is_unmatched_generic_inout
= 0;
639 var
->data
.is_unmatched_generic_inout
= 1;
646 * Set clip_distance_array_size based on the given shader.
648 * Also check for errors based on incorrect usage of gl_ClipVertex and
651 * Return false if an error was reported.
654 analyze_clip_usage(struct gl_shader_program
*prog
,
655 struct gl_shader
*shader
,
656 GLuint
*clip_distance_array_size
)
658 *clip_distance_array_size
= 0;
660 if (!prog
->IsES
&& prog
->Version
>= 130) {
661 /* From section 7.1 (Vertex Shader Special Variables) of the
664 * "It is an error for a shader to statically write both
665 * gl_ClipVertex and gl_ClipDistance."
667 * This does not apply to GLSL ES shaders, since GLSL ES defines neither
668 * gl_ClipVertex nor gl_ClipDistance.
670 find_assignment_visitor
clip_vertex("gl_ClipVertex");
671 find_assignment_visitor
clip_distance("gl_ClipDistance");
673 clip_vertex
.run(shader
->ir
);
674 clip_distance
.run(shader
->ir
);
675 if (clip_vertex
.variable_found() && clip_distance
.variable_found()) {
676 linker_error(prog
, "%s shader writes to both `gl_ClipVertex' "
677 "and `gl_ClipDistance'\n",
678 _mesa_shader_stage_to_string(shader
->Stage
));
682 if (clip_distance
.variable_found()) {
683 ir_variable
*clip_distance_var
=
684 shader
->symbols
->get_variable("gl_ClipDistance");
686 assert(clip_distance_var
);
687 *clip_distance_array_size
= clip_distance_var
->type
->length
;
694 * Verify that a vertex shader executable meets all semantic requirements.
696 * Also sets prog->Vert.ClipDistanceArraySize as a side effect.
698 * \param shader Vertex shader executable to be verified
701 validate_vertex_shader_executable(struct gl_shader_program
*prog
,
702 struct gl_shader
*shader
)
707 /* From the GLSL 1.10 spec, page 48:
709 * "The variable gl_Position is available only in the vertex
710 * language and is intended for writing the homogeneous vertex
711 * position. All executions of a well-formed vertex shader
712 * executable must write a value into this variable. [...] The
713 * variable gl_Position is available only in the vertex
714 * language and is intended for writing the homogeneous vertex
715 * position. All executions of a well-formed vertex shader
716 * executable must write a value into this variable."
718 * while in GLSL 1.40 this text is changed to:
720 * "The variable gl_Position is available only in the vertex
721 * language and is intended for writing the homogeneous vertex
722 * position. It can be written at any time during shader
723 * execution. It may also be read back by a vertex shader
724 * after being written. This value will be used by primitive
725 * assembly, clipping, culling, and other fixed functionality
726 * operations, if present, that operate on primitives after
727 * vertex processing has occurred. Its value is undefined if
728 * the vertex shader executable does not write gl_Position."
730 * All GLSL ES Versions are similar to GLSL 1.40--failing to write to
731 * gl_Position is not an error.
733 if (prog
->Version
< (prog
->IsES
? 300 : 140)) {
734 find_assignment_visitor
find("gl_Position");
735 find
.run(shader
->ir
);
736 if (!find
.variable_found()) {
739 "vertex shader does not write to `gl_Position'."
740 "It's value is undefined. \n");
743 "vertex shader does not write to `gl_Position'. \n");
749 analyze_clip_usage(prog
, shader
, &prog
->Vert
.ClipDistanceArraySize
);
753 validate_tess_eval_shader_executable(struct gl_shader_program
*prog
,
754 struct gl_shader
*shader
)
759 analyze_clip_usage(prog
, shader
, &prog
->TessEval
.ClipDistanceArraySize
);
764 * Verify that a fragment shader executable meets all semantic requirements
766 * \param shader Fragment shader executable to be verified
769 validate_fragment_shader_executable(struct gl_shader_program
*prog
,
770 struct gl_shader
*shader
)
775 find_assignment_visitor
frag_color("gl_FragColor");
776 find_assignment_visitor
frag_data("gl_FragData");
778 frag_color
.run(shader
->ir
);
779 frag_data
.run(shader
->ir
);
781 if (frag_color
.variable_found() && frag_data
.variable_found()) {
782 linker_error(prog
, "fragment shader writes to both "
783 "`gl_FragColor' and `gl_FragData'\n");
788 * Verify that a geometry shader executable meets all semantic requirements
790 * Also sets prog->Geom.VerticesIn, and prog->Geom.ClipDistanceArraySize as
793 * \param shader Geometry shader executable to be verified
796 validate_geometry_shader_executable(struct gl_shader_program
*prog
,
797 struct gl_shader
*shader
)
802 unsigned num_vertices
= vertices_per_prim(prog
->Geom
.InputType
);
803 prog
->Geom
.VerticesIn
= num_vertices
;
805 analyze_clip_usage(prog
, shader
, &prog
->Geom
.ClipDistanceArraySize
);
809 * Check if geometry shaders emit to non-zero streams and do corresponding
813 validate_geometry_shader_emissions(struct gl_context
*ctx
,
814 struct gl_shader_program
*prog
)
816 if (prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
] != NULL
) {
817 find_emit_vertex_visitor
emit_vertex(ctx
->Const
.MaxVertexStreams
- 1);
818 emit_vertex
.run(prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
]->ir
);
819 if (emit_vertex
.error()) {
820 linker_error(prog
, "Invalid call %s(%d). Accepted values for the "
821 "stream parameter are in the range [0, %d].\n",
822 emit_vertex
.error_func(),
823 emit_vertex
.error_stream(),
824 ctx
->Const
.MaxVertexStreams
- 1);
826 prog
->Geom
.UsesStreams
= emit_vertex
.uses_streams();
827 prog
->Geom
.UsesEndPrimitive
= emit_vertex
.uses_end_primitive();
829 /* From the ARB_gpu_shader5 spec:
831 * "Multiple vertex streams are supported only if the output primitive
832 * type is declared to be "points". A program will fail to link if it
833 * contains a geometry shader calling EmitStreamVertex() or
834 * EndStreamPrimitive() if its output primitive type is not "points".
836 * However, in the same spec:
838 * "The function EmitVertex() is equivalent to calling EmitStreamVertex()
839 * with <stream> set to zero."
843 * "The function EndPrimitive() is equivalent to calling
844 * EndStreamPrimitive() with <stream> set to zero."
846 * Since we can call EmitVertex() and EndPrimitive() when we output
847 * primitives other than points, calling EmitStreamVertex(0) or
848 * EmitEndPrimitive(0) should not produce errors. This it also what Nvidia
849 * does. Currently we only set prog->Geom.UsesStreams to TRUE when
850 * EmitStreamVertex() or EmitEndPrimitive() are called with a non-zero
853 if (prog
->Geom
.UsesStreams
&& prog
->Geom
.OutputType
!= GL_POINTS
) {
854 linker_error(prog
, "EmitStreamVertex(n) and EndStreamPrimitive(n) "
855 "with n>0 requires point output\n");
861 validate_intrastage_arrays(struct gl_shader_program
*prog
,
862 ir_variable
*const var
,
863 ir_variable
*const existing
)
865 /* Consider the types to be "the same" if both types are arrays
866 * of the same type and one of the arrays is implicitly sized.
867 * In addition, set the type of the linked variable to the
868 * explicitly sized array.
870 if (var
->type
->is_array() && existing
->type
->is_array()) {
871 if ((var
->type
->fields
.array
== existing
->type
->fields
.array
) &&
872 ((var
->type
->length
== 0)|| (existing
->type
->length
== 0))) {
873 if (var
->type
->length
!= 0) {
874 if (var
->type
->length
<= existing
->data
.max_array_access
) {
875 linker_error(prog
, "%s `%s' declared as type "
876 "`%s' but outermost dimension has an index"
879 var
->name
, var
->type
->name
,
880 existing
->data
.max_array_access
);
882 existing
->type
= var
->type
;
884 } else if (existing
->type
->length
!= 0) {
885 if(existing
->type
->length
<= var
->data
.max_array_access
&&
886 !existing
->data
.from_ssbo_unsized_array
) {
887 linker_error(prog
, "%s `%s' declared as type "
888 "`%s' but outermost dimension has an index"
891 var
->name
, existing
->type
->name
,
892 var
->data
.max_array_access
);
897 /* The arrays of structs could have different glsl_type pointers but
898 * they are actually the same type. Use record_compare() to check that.
900 if (existing
->type
->fields
.array
->is_record() &&
901 var
->type
->fields
.array
->is_record() &&
902 existing
->type
->fields
.array
->record_compare(var
->type
->fields
.array
))
911 * Perform validation of global variables used across multiple shaders
914 cross_validate_globals(struct gl_shader_program
*prog
,
915 struct gl_shader
**shader_list
,
916 unsigned num_shaders
,
919 /* Examine all of the uniforms in all of the shaders and cross validate
922 glsl_symbol_table variables
;
923 for (unsigned i
= 0; i
< num_shaders
; i
++) {
924 if (shader_list
[i
] == NULL
)
927 foreach_in_list(ir_instruction
, node
, shader_list
[i
]->ir
) {
928 ir_variable
*const var
= node
->as_variable();
933 if (uniforms_only
&& (var
->data
.mode
!= ir_var_uniform
&& var
->data
.mode
!= ir_var_shader_storage
))
936 /* don't cross validate subroutine uniforms */
937 if (var
->type
->contains_subroutine())
940 /* Don't cross validate temporaries that are at global scope. These
941 * will eventually get pulled into the shaders 'main'.
943 if (var
->data
.mode
== ir_var_temporary
)
946 /* If a global with this name has already been seen, verify that the
947 * new instance has the same type. In addition, if the globals have
948 * initializers, the values of the initializers must be the same.
950 ir_variable
*const existing
= variables
.get_variable(var
->name
);
951 if (existing
!= NULL
) {
952 /* Check if types match. Interface blocks have some special
953 * rules so we handle those elsewhere.
955 if (var
->type
!= existing
->type
&&
956 !var
->is_interface_instance()) {
957 if (!validate_intrastage_arrays(prog
, var
, existing
)) {
958 if (var
->type
->is_record() && existing
->type
->is_record()
959 && existing
->type
->record_compare(var
->type
)) {
960 existing
->type
= var
->type
;
962 /* If it is an unsized array in a Shader Storage Block,
963 * two different shaders can access to different elements.
964 * Because of that, they might be converted to different
965 * sized arrays, then check that they are compatible but
966 * ignore the array size.
968 if (!(var
->data
.mode
== ir_var_shader_storage
&&
969 var
->data
.from_ssbo_unsized_array
&&
970 existing
->data
.mode
== ir_var_shader_storage
&&
971 existing
->data
.from_ssbo_unsized_array
&&
972 var
->type
->gl_type
== existing
->type
->gl_type
)) {
973 linker_error(prog
, "%s `%s' declared as type "
974 "`%s' and type `%s'\n",
976 var
->name
, var
->type
->name
,
977 existing
->type
->name
);
984 if (var
->data
.explicit_location
) {
985 if (existing
->data
.explicit_location
986 && (var
->data
.location
!= existing
->data
.location
)) {
987 linker_error(prog
, "explicit locations for %s "
988 "`%s' have differing values\n",
989 mode_string(var
), var
->name
);
993 existing
->data
.location
= var
->data
.location
;
994 existing
->data
.explicit_location
= true;
996 /* Check if uniform with implicit location was marked explicit
997 * by earlier shader stage. If so, mark it explicit in this stage
998 * too to make sure later processing does not treat it as
1001 if (existing
->data
.explicit_location
) {
1002 var
->data
.location
= existing
->data
.location
;
1003 var
->data
.explicit_location
= true;
1007 /* From the GLSL 4.20 specification:
1008 * "A link error will result if two compilation units in a program
1009 * specify different integer-constant bindings for the same
1010 * opaque-uniform name. However, it is not an error to specify a
1011 * binding on some but not all declarations for the same name"
1013 if (var
->data
.explicit_binding
) {
1014 if (existing
->data
.explicit_binding
&&
1015 var
->data
.binding
!= existing
->data
.binding
) {
1016 linker_error(prog
, "explicit bindings for %s "
1017 "`%s' have differing values\n",
1018 mode_string(var
), var
->name
);
1022 existing
->data
.binding
= var
->data
.binding
;
1023 existing
->data
.explicit_binding
= true;
1026 if (var
->type
->contains_atomic() &&
1027 var
->data
.offset
!= existing
->data
.offset
) {
1028 linker_error(prog
, "offset specifications for %s "
1029 "`%s' have differing values\n",
1030 mode_string(var
), var
->name
);
1034 /* Validate layout qualifiers for gl_FragDepth.
1036 * From the AMD/ARB_conservative_depth specs:
1038 * "If gl_FragDepth is redeclared in any fragment shader in a
1039 * program, it must be redeclared in all fragment shaders in
1040 * that program that have static assignments to
1041 * gl_FragDepth. All redeclarations of gl_FragDepth in all
1042 * fragment shaders in a single program must have the same set
1045 if (strcmp(var
->name
, "gl_FragDepth") == 0) {
1046 bool layout_declared
= var
->data
.depth_layout
!= ir_depth_layout_none
;
1047 bool layout_differs
=
1048 var
->data
.depth_layout
!= existing
->data
.depth_layout
;
1050 if (layout_declared
&& layout_differs
) {
1052 "All redeclarations of gl_FragDepth in all "
1053 "fragment shaders in a single program must have "
1054 "the same set of qualifiers.\n");
1057 if (var
->data
.used
&& layout_differs
) {
1059 "If gl_FragDepth is redeclared with a layout "
1060 "qualifier in any fragment shader, it must be "
1061 "redeclared with the same layout qualifier in "
1062 "all fragment shaders that have assignments to "
1067 /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
1069 * "If a shared global has multiple initializers, the
1070 * initializers must all be constant expressions, and they
1071 * must all have the same value. Otherwise, a link error will
1072 * result. (A shared global having only one initializer does
1073 * not require that initializer to be a constant expression.)"
1075 * Previous to 4.20 the GLSL spec simply said that initializers
1076 * must have the same value. In this case of non-constant
1077 * initializers, this was impossible to determine. As a result,
1078 * no vendor actually implemented that behavior. The 4.20
1079 * behavior matches the implemented behavior of at least one other
1080 * vendor, so we'll implement that for all GLSL versions.
1082 if (var
->constant_initializer
!= NULL
) {
1083 if (existing
->constant_initializer
!= NULL
) {
1084 if (!var
->constant_initializer
->has_value(existing
->constant_initializer
)) {
1085 linker_error(prog
, "initializers for %s "
1086 "`%s' have differing values\n",
1087 mode_string(var
), var
->name
);
1091 /* If the first-seen instance of a particular uniform did not
1092 * have an initializer but a later instance does, copy the
1093 * initializer to the version stored in the symbol table.
1095 /* FINISHME: This is wrong. The constant_value field should
1096 * FINISHME: not be modified! Imagine a case where a shader
1097 * FINISHME: without an initializer is linked in two different
1098 * FINISHME: programs with shaders that have differing
1099 * FINISHME: initializers. Linking with the first will
1100 * FINISHME: modify the shader, and linking with the second
1101 * FINISHME: will fail.
1103 existing
->constant_initializer
=
1104 var
->constant_initializer
->clone(ralloc_parent(existing
),
1109 if (var
->data
.has_initializer
) {
1110 if (existing
->data
.has_initializer
1111 && (var
->constant_initializer
== NULL
1112 || existing
->constant_initializer
== NULL
)) {
1114 "shared global variable `%s' has multiple "
1115 "non-constant initializers.\n",
1120 /* Some instance had an initializer, so keep track of that. In
1121 * this location, all sorts of initializers (constant or
1122 * otherwise) will propagate the existence to the variable
1123 * stored in the symbol table.
1125 existing
->data
.has_initializer
= true;
1128 if (existing
->data
.invariant
!= var
->data
.invariant
) {
1129 linker_error(prog
, "declarations for %s `%s' have "
1130 "mismatching invariant qualifiers\n",
1131 mode_string(var
), var
->name
);
1134 if (existing
->data
.centroid
!= var
->data
.centroid
) {
1135 linker_error(prog
, "declarations for %s `%s' have "
1136 "mismatching centroid qualifiers\n",
1137 mode_string(var
), var
->name
);
1140 if (existing
->data
.sample
!= var
->data
.sample
) {
1141 linker_error(prog
, "declarations for %s `%s` have "
1142 "mismatching sample qualifiers\n",
1143 mode_string(var
), var
->name
);
1146 if (existing
->data
.image_format
!= var
->data
.image_format
) {
1147 linker_error(prog
, "declarations for %s `%s` have "
1148 "mismatching image format qualifiers\n",
1149 mode_string(var
), var
->name
);
1153 variables
.add_variable(var
);
1160 * Perform validation of uniforms used across multiple shader stages
1163 cross_validate_uniforms(struct gl_shader_program
*prog
)
1165 cross_validate_globals(prog
, prog
->_LinkedShaders
,
1166 MESA_SHADER_STAGES
, true);
1170 * Accumulates the array of prog->BufferInterfaceBlocks and checks that all
1171 * definitons of blocks agree on their contents.
1174 interstage_cross_validate_uniform_blocks(struct gl_shader_program
*prog
)
1176 unsigned max_num_uniform_blocks
= 0;
1177 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1178 if (prog
->_LinkedShaders
[i
])
1179 max_num_uniform_blocks
+= prog
->_LinkedShaders
[i
]->NumBufferInterfaceBlocks
;
1182 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1183 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
1185 prog
->InterfaceBlockStageIndex
[i
] = ralloc_array(prog
, int,
1186 max_num_uniform_blocks
);
1187 for (unsigned int j
= 0; j
< max_num_uniform_blocks
; j
++)
1188 prog
->InterfaceBlockStageIndex
[i
][j
] = -1;
1193 for (unsigned int j
= 0; j
< sh
->NumBufferInterfaceBlocks
; j
++) {
1194 int index
= link_cross_validate_uniform_block(prog
,
1195 &prog
->BufferInterfaceBlocks
,
1196 &prog
->NumBufferInterfaceBlocks
,
1197 &sh
->BufferInterfaceBlocks
[j
]);
1200 linker_error(prog
, "uniform block `%s' has mismatching definitions\n",
1201 sh
->BufferInterfaceBlocks
[j
].Name
);
1205 prog
->InterfaceBlockStageIndex
[i
][index
] = j
;
1214 * Populates a shaders symbol table with all global declarations
1217 populate_symbol_table(gl_shader
*sh
)
1219 sh
->symbols
= new(sh
) glsl_symbol_table
;
1221 foreach_in_list(ir_instruction
, inst
, sh
->ir
) {
1225 if ((func
= inst
->as_function()) != NULL
) {
1226 sh
->symbols
->add_function(func
);
1227 } else if ((var
= inst
->as_variable()) != NULL
) {
1228 if (var
->data
.mode
!= ir_var_temporary
)
1229 sh
->symbols
->add_variable(var
);
1236 * Remap variables referenced in an instruction tree
1238 * This is used when instruction trees are cloned from one shader and placed in
1239 * another. These trees will contain references to \c ir_variable nodes that
1240 * do not exist in the target shader. This function finds these \c ir_variable
1241 * references and replaces the references with matching variables in the target
1244 * If there is no matching variable in the target shader, a clone of the
1245 * \c ir_variable is made and added to the target shader. The new variable is
1246 * added to \b both the instruction stream and the symbol table.
1248 * \param inst IR tree that is to be processed.
1249 * \param symbols Symbol table containing global scope symbols in the
1251 * \param instructions Instruction stream where new variable declarations
1255 remap_variables(ir_instruction
*inst
, struct gl_shader
*target
,
1258 class remap_visitor
: public ir_hierarchical_visitor
{
1260 remap_visitor(struct gl_shader
*target
,
1263 this->target
= target
;
1264 this->symbols
= target
->symbols
;
1265 this->instructions
= target
->ir
;
1266 this->temps
= temps
;
1269 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
)
1271 if (ir
->var
->data
.mode
== ir_var_temporary
) {
1272 ir_variable
*var
= (ir_variable
*) hash_table_find(temps
, ir
->var
);
1274 assert(var
!= NULL
);
1276 return visit_continue
;
1279 ir_variable
*const existing
=
1280 this->symbols
->get_variable(ir
->var
->name
);
1281 if (existing
!= NULL
)
1284 ir_variable
*copy
= ir
->var
->clone(this->target
, NULL
);
1286 this->symbols
->add_variable(copy
);
1287 this->instructions
->push_head(copy
);
1291 return visit_continue
;
1295 struct gl_shader
*target
;
1296 glsl_symbol_table
*symbols
;
1297 exec_list
*instructions
;
1301 remap_visitor
v(target
, temps
);
1308 * Move non-declarations from one instruction stream to another
1310 * The intended usage pattern of this function is to pass the pointer to the
1311 * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
1312 * pointer) for \c last and \c false for \c make_copies on the first
1313 * call. Successive calls pass the return value of the previous call for
1314 * \c last and \c true for \c make_copies.
1316 * \param instructions Source instruction stream
1317 * \param last Instruction after which new instructions should be
1318 * inserted in the target instruction stream
1319 * \param make_copies Flag selecting whether instructions in \c instructions
1320 * should be copied (via \c ir_instruction::clone) into the
1321 * target list or moved.
1324 * The new "last" instruction in the target instruction stream. This pointer
1325 * is suitable for use as the \c last parameter of a later call to this
1329 move_non_declarations(exec_list
*instructions
, exec_node
*last
,
1330 bool make_copies
, gl_shader
*target
)
1332 hash_table
*temps
= NULL
;
1335 temps
= hash_table_ctor(0, hash_table_pointer_hash
,
1336 hash_table_pointer_compare
);
1338 foreach_in_list_safe(ir_instruction
, inst
, instructions
) {
1339 if (inst
->as_function())
1342 ir_variable
*var
= inst
->as_variable();
1343 if ((var
!= NULL
) && (var
->data
.mode
!= ir_var_temporary
))
1346 assert(inst
->as_assignment()
1348 || inst
->as_if() /* for initializers with the ?: operator */
1349 || ((var
!= NULL
) && (var
->data
.mode
== ir_var_temporary
)));
1352 inst
= inst
->clone(target
, NULL
);
1355 hash_table_insert(temps
, inst
, var
);
1357 remap_variables(inst
, target
, temps
);
1362 last
->insert_after(inst
);
1367 hash_table_dtor(temps
);
1374 * This class is only used in link_intrastage_shaders() below but declaring
1375 * it inside that function leads to compiler warnings with some versions of
1378 class array_sizing_visitor
: public ir_hierarchical_visitor
{
1380 array_sizing_visitor()
1381 : mem_ctx(ralloc_context(NULL
)),
1382 unnamed_interfaces(hash_table_ctor(0, hash_table_pointer_hash
,
1383 hash_table_pointer_compare
))
1387 ~array_sizing_visitor()
1389 hash_table_dtor(this->unnamed_interfaces
);
1390 ralloc_free(this->mem_ctx
);
1393 virtual ir_visitor_status
visit(ir_variable
*var
)
1395 const glsl_type
*type_without_array
;
1396 fixup_type(&var
->type
, var
->data
.max_array_access
,
1397 var
->data
.from_ssbo_unsized_array
);
1398 type_without_array
= var
->type
->without_array();
1399 if (var
->type
->is_interface()) {
1400 if (interface_contains_unsized_arrays(var
->type
)) {
1401 const glsl_type
*new_type
=
1402 resize_interface_members(var
->type
,
1403 var
->get_max_ifc_array_access(),
1404 var
->is_in_shader_storage_block());
1405 var
->type
= new_type
;
1406 var
->change_interface_type(new_type
);
1408 } else if (type_without_array
->is_interface()) {
1409 if (interface_contains_unsized_arrays(type_without_array
)) {
1410 const glsl_type
*new_type
=
1411 resize_interface_members(type_without_array
,
1412 var
->get_max_ifc_array_access(),
1413 var
->is_in_shader_storage_block());
1414 var
->change_interface_type(new_type
);
1415 var
->type
= update_interface_members_array(var
->type
, new_type
);
1417 } else if (const glsl_type
*ifc_type
= var
->get_interface_type()) {
1418 /* Store a pointer to the variable in the unnamed_interfaces
1421 ir_variable
**interface_vars
= (ir_variable
**)
1422 hash_table_find(this->unnamed_interfaces
, ifc_type
);
1423 if (interface_vars
== NULL
) {
1424 interface_vars
= rzalloc_array(mem_ctx
, ir_variable
*,
1426 hash_table_insert(this->unnamed_interfaces
, interface_vars
,
1429 unsigned index
= ifc_type
->field_index(var
->name
);
1430 assert(index
< ifc_type
->length
);
1431 assert(interface_vars
[index
] == NULL
);
1432 interface_vars
[index
] = var
;
1434 return visit_continue
;
1438 * For each unnamed interface block that was discovered while running the
1439 * visitor, adjust the interface type to reflect the newly assigned array
1440 * sizes, and fix up the ir_variable nodes to point to the new interface
1443 void fixup_unnamed_interface_types()
1445 hash_table_call_foreach(this->unnamed_interfaces
,
1446 fixup_unnamed_interface_type
, NULL
);
1451 * If the type pointed to by \c type represents an unsized array, replace
1452 * it with a sized array whose size is determined by max_array_access.
1454 static void fixup_type(const glsl_type
**type
, unsigned max_array_access
,
1455 bool from_ssbo_unsized_array
)
1457 if (!from_ssbo_unsized_array
&& (*type
)->is_unsized_array()) {
1458 *type
= glsl_type::get_array_instance((*type
)->fields
.array
,
1459 max_array_access
+ 1);
1460 assert(*type
!= NULL
);
1464 static const glsl_type
*
1465 update_interface_members_array(const glsl_type
*type
,
1466 const glsl_type
*new_interface_type
)
1468 const glsl_type
*element_type
= type
->fields
.array
;
1469 if (element_type
->is_array()) {
1470 const glsl_type
*new_array_type
=
1471 update_interface_members_array(element_type
, new_interface_type
);
1472 return glsl_type::get_array_instance(new_array_type
, type
->length
);
1474 return glsl_type::get_array_instance(new_interface_type
,
1480 * Determine whether the given interface type contains unsized arrays (if
1481 * it doesn't, array_sizing_visitor doesn't need to process it).
1483 static bool interface_contains_unsized_arrays(const glsl_type
*type
)
1485 for (unsigned i
= 0; i
< type
->length
; i
++) {
1486 const glsl_type
*elem_type
= type
->fields
.structure
[i
].type
;
1487 if (elem_type
->is_unsized_array())
1494 * Create a new interface type based on the given type, with unsized arrays
1495 * replaced by sized arrays whose size is determined by
1496 * max_ifc_array_access.
1498 static const glsl_type
*
1499 resize_interface_members(const glsl_type
*type
,
1500 const unsigned *max_ifc_array_access
,
1503 unsigned num_fields
= type
->length
;
1504 glsl_struct_field
*fields
= new glsl_struct_field
[num_fields
];
1505 memcpy(fields
, type
->fields
.structure
,
1506 num_fields
* sizeof(*fields
));
1507 for (unsigned i
= 0; i
< num_fields
; i
++) {
1508 /* If SSBO last member is unsized array, we don't replace it by a sized
1511 if (is_ssbo
&& i
== (num_fields
- 1))
1512 fixup_type(&fields
[i
].type
, max_ifc_array_access
[i
],
1515 fixup_type(&fields
[i
].type
, max_ifc_array_access
[i
],
1518 glsl_interface_packing packing
=
1519 (glsl_interface_packing
) type
->interface_packing
;
1520 const glsl_type
*new_ifc_type
=
1521 glsl_type::get_interface_instance(fields
, num_fields
,
1522 packing
, type
->name
);
1524 return new_ifc_type
;
1527 static void fixup_unnamed_interface_type(const void *key
, void *data
,
1530 const glsl_type
*ifc_type
= (const glsl_type
*) key
;
1531 ir_variable
**interface_vars
= (ir_variable
**) data
;
1532 unsigned num_fields
= ifc_type
->length
;
1533 glsl_struct_field
*fields
= new glsl_struct_field
[num_fields
];
1534 memcpy(fields
, ifc_type
->fields
.structure
,
1535 num_fields
* sizeof(*fields
));
1536 bool interface_type_changed
= false;
1537 for (unsigned i
= 0; i
< num_fields
; i
++) {
1538 if (interface_vars
[i
] != NULL
&&
1539 fields
[i
].type
!= interface_vars
[i
]->type
) {
1540 fields
[i
].type
= interface_vars
[i
]->type
;
1541 interface_type_changed
= true;
1544 if (!interface_type_changed
) {
1548 glsl_interface_packing packing
=
1549 (glsl_interface_packing
) ifc_type
->interface_packing
;
1550 const glsl_type
*new_ifc_type
=
1551 glsl_type::get_interface_instance(fields
, num_fields
, packing
,
1554 for (unsigned i
= 0; i
< num_fields
; i
++) {
1555 if (interface_vars
[i
] != NULL
)
1556 interface_vars
[i
]->change_interface_type(new_ifc_type
);
1561 * Memory context used to allocate the data in \c unnamed_interfaces.
1566 * Hash table from const glsl_type * to an array of ir_variable *'s
1567 * pointing to the ir_variables constituting each unnamed interface block.
1569 hash_table
*unnamed_interfaces
;
1574 * Performs the cross-validation of tessellation control shader vertices and
1575 * layout qualifiers for the attached tessellation control shaders,
1576 * and propagates them to the linked TCS and linked shader program.
1579 link_tcs_out_layout_qualifiers(struct gl_shader_program
*prog
,
1580 struct gl_shader
*linked_shader
,
1581 struct gl_shader
**shader_list
,
1582 unsigned num_shaders
)
1584 linked_shader
->TessCtrl
.VerticesOut
= 0;
1586 if (linked_shader
->Stage
!= MESA_SHADER_TESS_CTRL
)
1589 /* From the GLSL 4.0 spec (chapter 4.3.8.2):
1591 * "All tessellation control shader layout declarations in a program
1592 * must specify the same output patch vertex count. There must be at
1593 * least one layout qualifier specifying an output patch vertex count
1594 * in any program containing tessellation control shaders; however,
1595 * such a declaration is not required in all tessellation control
1599 for (unsigned i
= 0; i
< num_shaders
; i
++) {
1600 struct gl_shader
*shader
= shader_list
[i
];
1602 if (shader
->TessCtrl
.VerticesOut
!= 0) {
1603 if (linked_shader
->TessCtrl
.VerticesOut
!= 0 &&
1604 linked_shader
->TessCtrl
.VerticesOut
!= shader
->TessCtrl
.VerticesOut
) {
1605 linker_error(prog
, "tessellation control shader defined with "
1606 "conflicting output vertex count (%d and %d)\n",
1607 linked_shader
->TessCtrl
.VerticesOut
,
1608 shader
->TessCtrl
.VerticesOut
);
1611 linked_shader
->TessCtrl
.VerticesOut
= shader
->TessCtrl
.VerticesOut
;
1615 /* Just do the intrastage -> interstage propagation right now,
1616 * since we already know we're in the right type of shader program
1619 if (linked_shader
->TessCtrl
.VerticesOut
== 0) {
1620 linker_error(prog
, "tessellation control shader didn't declare "
1621 "vertices out layout qualifier\n");
1624 prog
->TessCtrl
.VerticesOut
= linked_shader
->TessCtrl
.VerticesOut
;
1629 * Performs the cross-validation of tessellation evaluation shader
1630 * primitive type, vertex spacing, ordering and point_mode layout qualifiers
1631 * for the attached tessellation evaluation shaders, and propagates them
1632 * to the linked TES and linked shader program.
1635 link_tes_in_layout_qualifiers(struct gl_shader_program
*prog
,
1636 struct gl_shader
*linked_shader
,
1637 struct gl_shader
**shader_list
,
1638 unsigned num_shaders
)
1640 linked_shader
->TessEval
.PrimitiveMode
= PRIM_UNKNOWN
;
1641 linked_shader
->TessEval
.Spacing
= 0;
1642 linked_shader
->TessEval
.VertexOrder
= 0;
1643 linked_shader
->TessEval
.PointMode
= -1;
1645 if (linked_shader
->Stage
!= MESA_SHADER_TESS_EVAL
)
1648 /* From the GLSL 4.0 spec (chapter 4.3.8.1):
1650 * "At least one tessellation evaluation shader (compilation unit) in
1651 * a program must declare a primitive mode in its input layout.
1652 * Declaration vertex spacing, ordering, and point mode identifiers is
1653 * optional. It is not required that all tessellation evaluation
1654 * shaders in a program declare a primitive mode. If spacing or
1655 * vertex ordering declarations are omitted, the tessellation
1656 * primitive generator will use equal spacing or counter-clockwise
1657 * vertex ordering, respectively. If a point mode declaration is
1658 * omitted, the tessellation primitive generator will produce lines or
1659 * triangles according to the primitive mode."
1662 for (unsigned i
= 0; i
< num_shaders
; i
++) {
1663 struct gl_shader
*shader
= shader_list
[i
];
1665 if (shader
->TessEval
.PrimitiveMode
!= PRIM_UNKNOWN
) {
1666 if (linked_shader
->TessEval
.PrimitiveMode
!= PRIM_UNKNOWN
&&
1667 linked_shader
->TessEval
.PrimitiveMode
!= shader
->TessEval
.PrimitiveMode
) {
1668 linker_error(prog
, "tessellation evaluation shader defined with "
1669 "conflicting input primitive modes.\n");
1672 linked_shader
->TessEval
.PrimitiveMode
= shader
->TessEval
.PrimitiveMode
;
1675 if (shader
->TessEval
.Spacing
!= 0) {
1676 if (linked_shader
->TessEval
.Spacing
!= 0 &&
1677 linked_shader
->TessEval
.Spacing
!= shader
->TessEval
.Spacing
) {
1678 linker_error(prog
, "tessellation evaluation shader defined with "
1679 "conflicting vertex spacing.\n");
1682 linked_shader
->TessEval
.Spacing
= shader
->TessEval
.Spacing
;
1685 if (shader
->TessEval
.VertexOrder
!= 0) {
1686 if (linked_shader
->TessEval
.VertexOrder
!= 0 &&
1687 linked_shader
->TessEval
.VertexOrder
!= shader
->TessEval
.VertexOrder
) {
1688 linker_error(prog
, "tessellation evaluation shader defined with "
1689 "conflicting ordering.\n");
1692 linked_shader
->TessEval
.VertexOrder
= shader
->TessEval
.VertexOrder
;
1695 if (shader
->TessEval
.PointMode
!= -1) {
1696 if (linked_shader
->TessEval
.PointMode
!= -1 &&
1697 linked_shader
->TessEval
.PointMode
!= shader
->TessEval
.PointMode
) {
1698 linker_error(prog
, "tessellation evaluation shader defined with "
1699 "conflicting point modes.\n");
1702 linked_shader
->TessEval
.PointMode
= shader
->TessEval
.PointMode
;
1707 /* Just do the intrastage -> interstage propagation right now,
1708 * since we already know we're in the right type of shader program
1711 if (linked_shader
->TessEval
.PrimitiveMode
== PRIM_UNKNOWN
) {
1713 "tessellation evaluation shader didn't declare input "
1714 "primitive modes.\n");
1717 prog
->TessEval
.PrimitiveMode
= linked_shader
->TessEval
.PrimitiveMode
;
1719 if (linked_shader
->TessEval
.Spacing
== 0)
1720 linked_shader
->TessEval
.Spacing
= GL_EQUAL
;
1721 prog
->TessEval
.Spacing
= linked_shader
->TessEval
.Spacing
;
1723 if (linked_shader
->TessEval
.VertexOrder
== 0)
1724 linked_shader
->TessEval
.VertexOrder
= GL_CCW
;
1725 prog
->TessEval
.VertexOrder
= linked_shader
->TessEval
.VertexOrder
;
1727 if (linked_shader
->TessEval
.PointMode
== -1)
1728 linked_shader
->TessEval
.PointMode
= GL_FALSE
;
1729 prog
->TessEval
.PointMode
= linked_shader
->TessEval
.PointMode
;
1734 * Performs the cross-validation of layout qualifiers specified in
1735 * redeclaration of gl_FragCoord for the attached fragment shaders,
1736 * and propagates them to the linked FS and linked shader program.
1739 link_fs_input_layout_qualifiers(struct gl_shader_program
*prog
,
1740 struct gl_shader
*linked_shader
,
1741 struct gl_shader
**shader_list
,
1742 unsigned num_shaders
)
1744 linked_shader
->redeclares_gl_fragcoord
= false;
1745 linked_shader
->uses_gl_fragcoord
= false;
1746 linked_shader
->origin_upper_left
= false;
1747 linked_shader
->pixel_center_integer
= false;
1749 if (linked_shader
->Stage
!= MESA_SHADER_FRAGMENT
||
1750 (prog
->Version
< 150 && !prog
->ARB_fragment_coord_conventions_enable
))
1753 for (unsigned i
= 0; i
< num_shaders
; i
++) {
1754 struct gl_shader
*shader
= shader_list
[i
];
1755 /* From the GLSL 1.50 spec, page 39:
1757 * "If gl_FragCoord is redeclared in any fragment shader in a program,
1758 * it must be redeclared in all the fragment shaders in that program
1759 * that have a static use gl_FragCoord."
1761 if ((linked_shader
->redeclares_gl_fragcoord
1762 && !shader
->redeclares_gl_fragcoord
1763 && shader
->uses_gl_fragcoord
)
1764 || (shader
->redeclares_gl_fragcoord
1765 && !linked_shader
->redeclares_gl_fragcoord
1766 && linked_shader
->uses_gl_fragcoord
)) {
1767 linker_error(prog
, "fragment shader defined with conflicting "
1768 "layout qualifiers for gl_FragCoord\n");
1771 /* From the GLSL 1.50 spec, page 39:
1773 * "All redeclarations of gl_FragCoord in all fragment shaders in a
1774 * single program must have the same set of qualifiers."
1776 if (linked_shader
->redeclares_gl_fragcoord
&& shader
->redeclares_gl_fragcoord
1777 && (shader
->origin_upper_left
!= linked_shader
->origin_upper_left
1778 || shader
->pixel_center_integer
!= linked_shader
->pixel_center_integer
)) {
1779 linker_error(prog
, "fragment shader defined with conflicting "
1780 "layout qualifiers for gl_FragCoord\n");
1783 /* Update the linked shader state. Note that uses_gl_fragcoord should
1784 * accumulate the results. The other values should replace. If there
1785 * are multiple redeclarations, all the fields except uses_gl_fragcoord
1786 * are already known to be the same.
1788 if (shader
->redeclares_gl_fragcoord
|| shader
->uses_gl_fragcoord
) {
1789 linked_shader
->redeclares_gl_fragcoord
=
1790 shader
->redeclares_gl_fragcoord
;
1791 linked_shader
->uses_gl_fragcoord
= linked_shader
->uses_gl_fragcoord
1792 || shader
->uses_gl_fragcoord
;
1793 linked_shader
->origin_upper_left
= shader
->origin_upper_left
;
1794 linked_shader
->pixel_center_integer
= shader
->pixel_center_integer
;
1797 linked_shader
->EarlyFragmentTests
|= shader
->EarlyFragmentTests
;
1802 * Performs the cross-validation of geometry shader max_vertices and
1803 * primitive type layout qualifiers for the attached geometry shaders,
1804 * and propagates them to the linked GS and linked shader program.
1807 link_gs_inout_layout_qualifiers(struct gl_shader_program
*prog
,
1808 struct gl_shader
*linked_shader
,
1809 struct gl_shader
**shader_list
,
1810 unsigned num_shaders
)
1812 linked_shader
->Geom
.VerticesOut
= 0;
1813 linked_shader
->Geom
.Invocations
= 0;
1814 linked_shader
->Geom
.InputType
= PRIM_UNKNOWN
;
1815 linked_shader
->Geom
.OutputType
= PRIM_UNKNOWN
;
1817 /* No in/out qualifiers defined for anything but GLSL 1.50+
1818 * geometry shaders so far.
1820 if (linked_shader
->Stage
!= MESA_SHADER_GEOMETRY
|| prog
->Version
< 150)
1823 /* From the GLSL 1.50 spec, page 46:
1825 * "All geometry shader output layout declarations in a program
1826 * must declare the same layout and same value for
1827 * max_vertices. There must be at least one geometry output
1828 * layout declaration somewhere in a program, but not all
1829 * geometry shaders (compilation units) are required to
1833 for (unsigned i
= 0; i
< num_shaders
; i
++) {
1834 struct gl_shader
*shader
= shader_list
[i
];
1836 if (shader
->Geom
.InputType
!= PRIM_UNKNOWN
) {
1837 if (linked_shader
->Geom
.InputType
!= PRIM_UNKNOWN
&&
1838 linked_shader
->Geom
.InputType
!= shader
->Geom
.InputType
) {
1839 linker_error(prog
, "geometry shader defined with conflicting "
1843 linked_shader
->Geom
.InputType
= shader
->Geom
.InputType
;
1846 if (shader
->Geom
.OutputType
!= PRIM_UNKNOWN
) {
1847 if (linked_shader
->Geom
.OutputType
!= PRIM_UNKNOWN
&&
1848 linked_shader
->Geom
.OutputType
!= shader
->Geom
.OutputType
) {
1849 linker_error(prog
, "geometry shader defined with conflicting "
1853 linked_shader
->Geom
.OutputType
= shader
->Geom
.OutputType
;
1856 if (shader
->Geom
.VerticesOut
!= 0) {
1857 if (linked_shader
->Geom
.VerticesOut
!= 0 &&
1858 linked_shader
->Geom
.VerticesOut
!= shader
->Geom
.VerticesOut
) {
1859 linker_error(prog
, "geometry shader defined with conflicting "
1860 "output vertex count (%d and %d)\n",
1861 linked_shader
->Geom
.VerticesOut
,
1862 shader
->Geom
.VerticesOut
);
1865 linked_shader
->Geom
.VerticesOut
= shader
->Geom
.VerticesOut
;
1868 if (shader
->Geom
.Invocations
!= 0) {
1869 if (linked_shader
->Geom
.Invocations
!= 0 &&
1870 linked_shader
->Geom
.Invocations
!= shader
->Geom
.Invocations
) {
1871 linker_error(prog
, "geometry shader defined with conflicting "
1872 "invocation count (%d and %d)\n",
1873 linked_shader
->Geom
.Invocations
,
1874 shader
->Geom
.Invocations
);
1877 linked_shader
->Geom
.Invocations
= shader
->Geom
.Invocations
;
1881 /* Just do the intrastage -> interstage propagation right now,
1882 * since we already know we're in the right type of shader program
1885 if (linked_shader
->Geom
.InputType
== PRIM_UNKNOWN
) {
1887 "geometry shader didn't declare primitive input type\n");
1890 prog
->Geom
.InputType
= linked_shader
->Geom
.InputType
;
1892 if (linked_shader
->Geom
.OutputType
== PRIM_UNKNOWN
) {
1894 "geometry shader didn't declare primitive output type\n");
1897 prog
->Geom
.OutputType
= linked_shader
->Geom
.OutputType
;
1899 if (linked_shader
->Geom
.VerticesOut
== 0) {
1901 "geometry shader didn't declare max_vertices\n");
1904 prog
->Geom
.VerticesOut
= linked_shader
->Geom
.VerticesOut
;
1906 if (linked_shader
->Geom
.Invocations
== 0)
1907 linked_shader
->Geom
.Invocations
= 1;
1909 prog
->Geom
.Invocations
= linked_shader
->Geom
.Invocations
;
1914 * Perform cross-validation of compute shader local_size_{x,y,z} layout
1915 * qualifiers for the attached compute shaders, and propagate them to the
1916 * linked CS and linked shader program.
1919 link_cs_input_layout_qualifiers(struct gl_shader_program
*prog
,
1920 struct gl_shader
*linked_shader
,
1921 struct gl_shader
**shader_list
,
1922 unsigned num_shaders
)
1924 for (int i
= 0; i
< 3; i
++)
1925 linked_shader
->Comp
.LocalSize
[i
] = 0;
1927 /* This function is called for all shader stages, but it only has an effect
1928 * for compute shaders.
1930 if (linked_shader
->Stage
!= MESA_SHADER_COMPUTE
)
1933 /* From the ARB_compute_shader spec, in the section describing local size
1936 * If multiple compute shaders attached to a single program object
1937 * declare local work-group size, the declarations must be identical;
1938 * otherwise a link-time error results. Furthermore, if a program
1939 * object contains any compute shaders, at least one must contain an
1940 * input layout qualifier specifying the local work sizes of the
1941 * program, or a link-time error will occur.
1943 for (unsigned sh
= 0; sh
< num_shaders
; sh
++) {
1944 struct gl_shader
*shader
= shader_list
[sh
];
1946 if (shader
->Comp
.LocalSize
[0] != 0) {
1947 if (linked_shader
->Comp
.LocalSize
[0] != 0) {
1948 for (int i
= 0; i
< 3; i
++) {
1949 if (linked_shader
->Comp
.LocalSize
[i
] !=
1950 shader
->Comp
.LocalSize
[i
]) {
1951 linker_error(prog
, "compute shader defined with conflicting "
1957 for (int i
= 0; i
< 3; i
++)
1958 linked_shader
->Comp
.LocalSize
[i
] = shader
->Comp
.LocalSize
[i
];
1962 /* Just do the intrastage -> interstage propagation right now,
1963 * since we already know we're in the right type of shader program
1966 if (linked_shader
->Comp
.LocalSize
[0] == 0) {
1967 linker_error(prog
, "compute shader didn't declare local size\n");
1970 for (int i
= 0; i
< 3; i
++)
1971 prog
->Comp
.LocalSize
[i
] = linked_shader
->Comp
.LocalSize
[i
];
1976 * Combine a group of shaders for a single stage to generate a linked shader
1979 * If this function is supplied a single shader, it is cloned, and the new
1980 * shader is returned.
1982 static struct gl_shader
*
1983 link_intrastage_shaders(void *mem_ctx
,
1984 struct gl_context
*ctx
,
1985 struct gl_shader_program
*prog
,
1986 struct gl_shader
**shader_list
,
1987 unsigned num_shaders
)
1989 struct gl_uniform_block
*uniform_blocks
= NULL
;
1991 /* Check that global variables defined in multiple shaders are consistent.
1993 cross_validate_globals(prog
, shader_list
, num_shaders
, false);
1994 if (!prog
->LinkStatus
)
1997 /* Check that interface blocks defined in multiple shaders are consistent.
1999 validate_intrastage_interface_blocks(prog
, (const gl_shader
**)shader_list
,
2001 if (!prog
->LinkStatus
)
2004 /* Link up uniform blocks defined within this stage. */
2005 const unsigned num_uniform_blocks
=
2006 link_uniform_blocks(mem_ctx
, ctx
, prog
, shader_list
, num_shaders
,
2008 if (!prog
->LinkStatus
)
2011 /* Check that there is only a single definition of each function signature
2012 * across all shaders.
2014 for (unsigned i
= 0; i
< (num_shaders
- 1); i
++) {
2015 foreach_in_list(ir_instruction
, node
, shader_list
[i
]->ir
) {
2016 ir_function
*const f
= node
->as_function();
2021 for (unsigned j
= i
+ 1; j
< num_shaders
; j
++) {
2022 ir_function
*const other
=
2023 shader_list
[j
]->symbols
->get_function(f
->name
);
2025 /* If the other shader has no function (and therefore no function
2026 * signatures) with the same name, skip to the next shader.
2031 foreach_in_list(ir_function_signature
, sig
, &f
->signatures
) {
2032 if (!sig
->is_defined
|| sig
->is_builtin())
2035 ir_function_signature
*other_sig
=
2036 other
->exact_matching_signature(NULL
, &sig
->parameters
);
2038 if ((other_sig
!= NULL
) && other_sig
->is_defined
2039 && !other_sig
->is_builtin()) {
2040 linker_error(prog
, "function `%s' is multiply defined\n",
2049 /* Find the shader that defines main, and make a clone of it.
2051 * Starting with the clone, search for undefined references. If one is
2052 * found, find the shader that defines it. Clone the reference and add
2053 * it to the shader. Repeat until there are no undefined references or
2054 * until a reference cannot be resolved.
2056 gl_shader
*main
= NULL
;
2057 for (unsigned i
= 0; i
< num_shaders
; i
++) {
2058 if (_mesa_get_main_function_signature(shader_list
[i
]) != NULL
) {
2059 main
= shader_list
[i
];
2065 linker_error(prog
, "%s shader lacks `main'\n",
2066 _mesa_shader_stage_to_string(shader_list
[0]->Stage
));
2070 gl_shader
*linked
= ctx
->Driver
.NewShader(NULL
, 0, main
->Type
);
2071 linked
->ir
= new(linked
) exec_list
;
2072 clone_ir_list(mem_ctx
, linked
->ir
, main
->ir
);
2074 linked
->BufferInterfaceBlocks
= uniform_blocks
;
2075 linked
->NumBufferInterfaceBlocks
= num_uniform_blocks
;
2076 ralloc_steal(linked
, linked
->BufferInterfaceBlocks
);
2078 link_fs_input_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2079 link_tcs_out_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2080 link_tes_in_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2081 link_gs_inout_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2082 link_cs_input_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2084 populate_symbol_table(linked
);
2086 /* The pointer to the main function in the final linked shader (i.e., the
2087 * copy of the original shader that contained the main function).
2089 ir_function_signature
*const main_sig
=
2090 _mesa_get_main_function_signature(linked
);
2092 /* Move any instructions other than variable declarations or function
2093 * declarations into main.
2095 exec_node
*insertion_point
=
2096 move_non_declarations(linked
->ir
, (exec_node
*) &main_sig
->body
, false,
2099 for (unsigned i
= 0; i
< num_shaders
; i
++) {
2100 if (shader_list
[i
] == main
)
2103 insertion_point
= move_non_declarations(shader_list
[i
]->ir
,
2104 insertion_point
, true, linked
);
2107 /* Check if any shader needs built-in functions. */
2108 bool need_builtins
= false;
2109 for (unsigned i
= 0; i
< num_shaders
; i
++) {
2110 if (shader_list
[i
]->uses_builtin_functions
) {
2111 need_builtins
= true;
2117 if (need_builtins
) {
2118 /* Make a temporary array one larger than shader_list, which will hold
2119 * the built-in function shader as well.
2121 gl_shader
**linking_shaders
= (gl_shader
**)
2122 calloc(num_shaders
+ 1, sizeof(gl_shader
*));
2124 ok
= linking_shaders
!= NULL
;
2127 memcpy(linking_shaders
, shader_list
, num_shaders
* sizeof(gl_shader
*));
2128 _mesa_glsl_initialize_builtin_functions();
2129 linking_shaders
[num_shaders
] = _mesa_glsl_get_builtin_function_shader();
2131 ok
= link_function_calls(prog
, linked
, linking_shaders
, num_shaders
+ 1);
2133 free(linking_shaders
);
2135 _mesa_error_no_memory(__func__
);
2138 ok
= link_function_calls(prog
, linked
, shader_list
, num_shaders
);
2143 _mesa_delete_shader(ctx
, linked
);
2147 /* At this point linked should contain all of the linked IR, so
2148 * validate it to make sure nothing went wrong.
2150 validate_ir_tree(linked
->ir
);
2152 /* Set the size of geometry shader input arrays */
2153 if (linked
->Stage
== MESA_SHADER_GEOMETRY
) {
2154 unsigned num_vertices
= vertices_per_prim(prog
->Geom
.InputType
);
2155 geom_array_resize_visitor
input_resize_visitor(num_vertices
, prog
);
2156 foreach_in_list(ir_instruction
, ir
, linked
->ir
) {
2157 ir
->accept(&input_resize_visitor
);
2161 if (ctx
->Const
.VertexID_is_zero_based
)
2162 lower_vertex_id(linked
);
2164 /* Validate correct usage of barrier() in the tess control shader */
2165 if (linked
->Stage
== MESA_SHADER_TESS_CTRL
) {
2166 barrier_use_visitor
visitor(prog
);
2167 foreach_in_list(ir_instruction
, ir
, linked
->ir
) {
2168 ir
->accept(&visitor
);
2172 /* Make a pass over all variable declarations to ensure that arrays with
2173 * unspecified sizes have a size specified. The size is inferred from the
2174 * max_array_access field.
2176 array_sizing_visitor v
;
2178 v
.fixup_unnamed_interface_types();
2184 * Update the sizes of linked shader uniform arrays to the maximum
2187 * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
2189 * If one or more elements of an array are active,
2190 * GetActiveUniform will return the name of the array in name,
2191 * subject to the restrictions listed above. The type of the array
2192 * is returned in type. The size parameter contains the highest
2193 * array element index used, plus one. The compiler or linker
2194 * determines the highest index used. There will be only one
2195 * active uniform reported by the GL per uniform array.
2199 update_array_sizes(struct gl_shader_program
*prog
)
2201 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2202 if (prog
->_LinkedShaders
[i
] == NULL
)
2205 foreach_in_list(ir_instruction
, node
, prog
->_LinkedShaders
[i
]->ir
) {
2206 ir_variable
*const var
= node
->as_variable();
2208 if ((var
== NULL
) || (var
->data
.mode
!= ir_var_uniform
) ||
2209 !var
->type
->is_array())
2212 /* GL_ARB_uniform_buffer_object says that std140 uniforms
2213 * will not be eliminated. Since we always do std140, just
2214 * don't resize arrays in UBOs.
2216 * Atomic counters are supposed to get deterministic
2217 * locations assigned based on the declaration ordering and
2218 * sizes, array compaction would mess that up.
2220 * Subroutine uniforms are not removed.
2222 if (var
->is_in_buffer_block() || var
->type
->contains_atomic() ||
2223 var
->type
->contains_subroutine())
2226 unsigned int size
= var
->data
.max_array_access
;
2227 for (unsigned j
= 0; j
< MESA_SHADER_STAGES
; j
++) {
2228 if (prog
->_LinkedShaders
[j
] == NULL
)
2231 foreach_in_list(ir_instruction
, node2
, prog
->_LinkedShaders
[j
]->ir
) {
2232 ir_variable
*other_var
= node2
->as_variable();
2236 if (strcmp(var
->name
, other_var
->name
) == 0 &&
2237 other_var
->data
.max_array_access
> size
) {
2238 size
= other_var
->data
.max_array_access
;
2243 if (size
+ 1 != var
->type
->length
) {
2244 /* If this is a built-in uniform (i.e., it's backed by some
2245 * fixed-function state), adjust the number of state slots to
2246 * match the new array size. The number of slots per array entry
2247 * is not known. It seems safe to assume that the total number of
2248 * slots is an integer multiple of the number of array elements.
2249 * Determine the number of slots per array element by dividing by
2250 * the old (total) size.
2252 const unsigned num_slots
= var
->get_num_state_slots();
2253 if (num_slots
> 0) {
2254 var
->set_num_state_slots((size
+ 1)
2255 * (num_slots
/ var
->type
->length
));
2258 var
->type
= glsl_type::get_array_instance(var
->type
->fields
.array
,
2260 /* FINISHME: We should update the types of array
2261 * dereferences of this variable now.
2269 * Resize tessellation evaluation per-vertex inputs to the size of
2270 * tessellation control per-vertex outputs.
2273 resize_tes_inputs(struct gl_context
*ctx
,
2274 struct gl_shader_program
*prog
)
2276 if (prog
->_LinkedShaders
[MESA_SHADER_TESS_EVAL
] == NULL
)
2279 gl_shader
*const tcs
= prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
];
2280 gl_shader
*const tes
= prog
->_LinkedShaders
[MESA_SHADER_TESS_EVAL
];
2282 /* If no control shader is present, then the TES inputs are statically
2283 * sized to MaxPatchVertices; the actual size of the arrays won't be
2284 * known until draw time.
2286 const int num_vertices
= tcs
2287 ? tcs
->TessCtrl
.VerticesOut
2288 : ctx
->Const
.MaxPatchVertices
;
2290 tess_eval_array_resize_visitor
input_resize_visitor(num_vertices
, prog
);
2291 foreach_in_list(ir_instruction
, ir
, tes
->ir
) {
2292 ir
->accept(&input_resize_visitor
);
2296 /* Convert the gl_PatchVerticesIn system value into a constant, since
2297 * the value is known at this point.
2299 foreach_in_list(ir_instruction
, ir
, tes
->ir
) {
2300 ir_variable
*var
= ir
->as_variable();
2301 if (var
&& var
->data
.mode
== ir_var_system_value
&&
2302 var
->data
.location
== SYSTEM_VALUE_VERTICES_IN
) {
2303 void *mem_ctx
= ralloc_parent(var
);
2304 var
->data
.mode
= ir_var_auto
;
2305 var
->data
.location
= 0;
2306 var
->constant_value
= new(mem_ctx
) ir_constant(num_vertices
);
2313 * Find a contiguous set of available bits in a bitmask.
2315 * \param used_mask Bits representing used (1) and unused (0) locations
2316 * \param needed_count Number of contiguous bits needed.
2319 * Base location of the available bits on success or -1 on failure.
2322 find_available_slots(unsigned used_mask
, unsigned needed_count
)
2324 unsigned needed_mask
= (1 << needed_count
) - 1;
2325 const int max_bit_to_test
= (8 * sizeof(used_mask
)) - needed_count
;
2327 /* The comparison to 32 is redundant, but without it GCC emits "warning:
2328 * cannot optimize possibly infinite loops" for the loop below.
2330 if ((needed_count
== 0) || (max_bit_to_test
< 0) || (max_bit_to_test
> 32))
2333 for (int i
= 0; i
<= max_bit_to_test
; i
++) {
2334 if ((needed_mask
& ~used_mask
) == needed_mask
)
2345 * Assign locations for either VS inputs or FS outputs
2347 * \param prog Shader program whose variables need locations assigned
2348 * \param constants Driver specific constant values for the program.
2349 * \param target_index Selector for the program target to receive location
2350 * assignmnets. Must be either \c MESA_SHADER_VERTEX or
2351 * \c MESA_SHADER_FRAGMENT.
2354 * If locations are successfully assigned, true is returned. Otherwise an
2355 * error is emitted to the shader link log and false is returned.
2358 assign_attribute_or_color_locations(gl_shader_program
*prog
,
2359 struct gl_constants
*constants
,
2360 unsigned target_index
)
2362 /* Maximum number of generic locations. This corresponds to either the
2363 * maximum number of draw buffers or the maximum number of generic
2366 unsigned max_index
= (target_index
== MESA_SHADER_VERTEX
) ?
2367 constants
->Program
[target_index
].MaxAttribs
:
2368 MAX2(constants
->MaxDrawBuffers
, constants
->MaxDualSourceDrawBuffers
);
2370 /* Mark invalid locations as being used.
2372 unsigned used_locations
= (max_index
>= 32)
2373 ? ~0 : ~((1 << max_index
) - 1);
2374 unsigned double_storage_locations
= 0;
2376 assert((target_index
== MESA_SHADER_VERTEX
)
2377 || (target_index
== MESA_SHADER_FRAGMENT
));
2379 gl_shader
*const sh
= prog
->_LinkedShaders
[target_index
];
2383 /* Operate in a total of four passes.
2385 * 1. Invalidate the location assignments for all vertex shader inputs.
2387 * 2. Assign locations for inputs that have user-defined (via
2388 * glBindVertexAttribLocation) locations and outputs that have
2389 * user-defined locations (via glBindFragDataLocation).
2391 * 3. Sort the attributes without assigned locations by number of slots
2392 * required in decreasing order. Fragmentation caused by attribute
2393 * locations assigned by the application may prevent large attributes
2394 * from having enough contiguous space.
2396 * 4. Assign locations to any inputs without assigned locations.
2399 const int generic_base
= (target_index
== MESA_SHADER_VERTEX
)
2400 ? (int) VERT_ATTRIB_GENERIC0
: (int) FRAG_RESULT_DATA0
;
2402 const enum ir_variable_mode direction
=
2403 (target_index
== MESA_SHADER_VERTEX
)
2404 ? ir_var_shader_in
: ir_var_shader_out
;
2407 /* Temporary storage for the set of attributes that need locations assigned.
2413 /* Used below in the call to qsort. */
2414 static int compare(const void *a
, const void *b
)
2416 const temp_attr
*const l
= (const temp_attr
*) a
;
2417 const temp_attr
*const r
= (const temp_attr
*) b
;
2419 /* Reversed because we want a descending order sort below. */
2420 return r
->slots
- l
->slots
;
2424 unsigned num_attr
= 0;
2426 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
2427 ir_variable
*const var
= node
->as_variable();
2429 if ((var
== NULL
) || (var
->data
.mode
!= (unsigned) direction
))
2432 if (var
->data
.explicit_location
) {
2433 var
->data
.is_unmatched_generic_inout
= 0;
2434 if ((var
->data
.location
>= (int)(max_index
+ generic_base
))
2435 || (var
->data
.location
< 0)) {
2437 "invalid explicit location %d specified for `%s'\n",
2438 (var
->data
.location
< 0)
2439 ? var
->data
.location
2440 : var
->data
.location
- generic_base
,
2444 } else if (target_index
== MESA_SHADER_VERTEX
) {
2447 if (prog
->AttributeBindings
->get(binding
, var
->name
)) {
2448 assert(binding
>= VERT_ATTRIB_GENERIC0
);
2449 var
->data
.location
= binding
;
2450 var
->data
.is_unmatched_generic_inout
= 0;
2452 } else if (target_index
== MESA_SHADER_FRAGMENT
) {
2456 if (prog
->FragDataBindings
->get(binding
, var
->name
)) {
2457 assert(binding
>= FRAG_RESULT_DATA0
);
2458 var
->data
.location
= binding
;
2459 var
->data
.is_unmatched_generic_inout
= 0;
2461 if (prog
->FragDataIndexBindings
->get(index
, var
->name
)) {
2462 var
->data
.index
= index
;
2467 /* From GL4.5 core spec, section 15.2 (Shader Execution):
2469 * "Output binding assignments will cause LinkProgram to fail:
2471 * If the program has an active output assigned to a location greater
2472 * than or equal to the value of MAX_DUAL_SOURCE_DRAW_BUFFERS and has
2473 * an active output assigned an index greater than or equal to one;"
2475 if (target_index
== MESA_SHADER_FRAGMENT
&& var
->data
.index
>= 1 &&
2476 var
->data
.location
- generic_base
>=
2477 (int) constants
->MaxDualSourceDrawBuffers
) {
2479 "output location %d >= GL_MAX_DUAL_SOURCE_DRAW_BUFFERS "
2480 "with index %u for %s\n",
2481 var
->data
.location
- generic_base
, var
->data
.index
,
2486 const unsigned slots
= var
->type
->count_attribute_slots(target_index
== MESA_SHADER_VERTEX
? true : false);
2488 /* If the variable is not a built-in and has a location statically
2489 * assigned in the shader (presumably via a layout qualifier), make sure
2490 * that it doesn't collide with other assigned locations. Otherwise,
2491 * add it to the list of variables that need linker-assigned locations.
2493 if (var
->data
.location
!= -1) {
2494 if (var
->data
.location
>= generic_base
&& var
->data
.index
< 1) {
2495 /* From page 61 of the OpenGL 4.0 spec:
2497 * "LinkProgram will fail if the attribute bindings assigned
2498 * by BindAttribLocation do not leave not enough space to
2499 * assign a location for an active matrix attribute or an
2500 * active attribute array, both of which require multiple
2501 * contiguous generic attributes."
2503 * I think above text prohibits the aliasing of explicit and
2504 * automatic assignments. But, aliasing is allowed in manual
2505 * assignments of attribute locations. See below comments for
2508 * From OpenGL 4.0 spec, page 61:
2510 * "It is possible for an application to bind more than one
2511 * attribute name to the same location. This is referred to as
2512 * aliasing. This will only work if only one of the aliased
2513 * attributes is active in the executable program, or if no
2514 * path through the shader consumes more than one attribute of
2515 * a set of attributes aliased to the same location. A link
2516 * error can occur if the linker determines that every path
2517 * through the shader consumes multiple aliased attributes,
2518 * but implementations are not required to generate an error
2521 * From GLSL 4.30 spec, page 54:
2523 * "A program will fail to link if any two non-vertex shader
2524 * input variables are assigned to the same location. For
2525 * vertex shaders, multiple input variables may be assigned
2526 * to the same location using either layout qualifiers or via
2527 * the OpenGL API. However, such aliasing is intended only to
2528 * support vertex shaders where each execution path accesses
2529 * at most one input per each location. Implementations are
2530 * permitted, but not required, to generate link-time errors
2531 * if they detect that every path through the vertex shader
2532 * executable accesses multiple inputs assigned to any single
2533 * location. For all shader types, a program will fail to link
2534 * if explicit location assignments leave the linker unable
2535 * to find space for other variables without explicit
2538 * From OpenGL ES 3.0 spec, page 56:
2540 * "Binding more than one attribute name to the same location
2541 * is referred to as aliasing, and is not permitted in OpenGL
2542 * ES Shading Language 3.00 vertex shaders. LinkProgram will
2543 * fail when this condition exists. However, aliasing is
2544 * possible in OpenGL ES Shading Language 1.00 vertex shaders.
2545 * This will only work if only one of the aliased attributes
2546 * is active in the executable program, or if no path through
2547 * the shader consumes more than one attribute of a set of
2548 * attributes aliased to the same location. A link error can
2549 * occur if the linker determines that every path through the
2550 * shader consumes multiple aliased attributes, but implemen-
2551 * tations are not required to generate an error in this case."
2553 * After looking at above references from OpenGL, OpenGL ES and
2554 * GLSL specifications, we allow aliasing of vertex input variables
2555 * in: OpenGL 2.0 (and above) and OpenGL ES 2.0.
2557 * NOTE: This is not required by the spec but its worth mentioning
2558 * here that we're not doing anything to make sure that no path
2559 * through the vertex shader executable accesses multiple inputs
2560 * assigned to any single location.
2563 /* Mask representing the contiguous slots that will be used by
2566 const unsigned attr
= var
->data
.location
- generic_base
;
2567 const unsigned use_mask
= (1 << slots
) - 1;
2568 const char *const string
= (target_index
== MESA_SHADER_VERTEX
)
2569 ? "vertex shader input" : "fragment shader output";
2571 /* Generate a link error if the requested locations for this
2572 * attribute exceed the maximum allowed attribute location.
2574 if (attr
+ slots
> max_index
) {
2576 "insufficient contiguous locations "
2577 "available for %s `%s' %d %d %d\n", string
,
2578 var
->name
, used_locations
, use_mask
, attr
);
2582 /* Generate a link error if the set of bits requested for this
2583 * attribute overlaps any previously allocated bits.
2585 if ((~(use_mask
<< attr
) & used_locations
) != used_locations
) {
2586 if (target_index
== MESA_SHADER_FRAGMENT
||
2587 (prog
->IsES
&& prog
->Version
>= 300)) {
2589 "overlapping location is assigned "
2590 "to %s `%s' %d %d %d\n", string
,
2591 var
->name
, used_locations
, use_mask
, attr
);
2594 linker_warning(prog
,
2595 "overlapping location is assigned "
2596 "to %s `%s' %d %d %d\n", string
,
2597 var
->name
, used_locations
, use_mask
, attr
);
2601 used_locations
|= (use_mask
<< attr
);
2603 /* From the GL 4.5 core spec, section 11.1.1 (Vertex Attributes):
2605 * "A program with more than the value of MAX_VERTEX_ATTRIBS
2606 * active attribute variables may fail to link, unless
2607 * device-dependent optimizations are able to make the program
2608 * fit within available hardware resources. For the purposes
2609 * of this test, attribute variables of the type dvec3, dvec4,
2610 * dmat2x3, dmat2x4, dmat3, dmat3x4, dmat4x3, and dmat4 may
2611 * count as consuming twice as many attributes as equivalent
2612 * single-precision types. While these types use the same number
2613 * of generic attributes as their single-precision equivalents,
2614 * implementations are permitted to consume two single-precision
2615 * vectors of internal storage for each three- or four-component
2616 * double-precision vector."
2618 * Mark this attribute slot as taking up twice as much space
2619 * so we can count it properly against limits. According to
2620 * issue (3) of the GL_ARB_vertex_attrib_64bit behavior, this
2621 * is optional behavior, but it seems preferable.
2623 if (var
->type
->without_array()->is_dual_slot_double())
2624 double_storage_locations
|= (use_mask
<< attr
);
2630 to_assign
[num_attr
].slots
= slots
;
2631 to_assign
[num_attr
].var
= var
;
2635 if (target_index
== MESA_SHADER_VERTEX
) {
2636 unsigned total_attribs_size
=
2637 _mesa_bitcount(used_locations
& ((1 << max_index
) - 1)) +
2638 _mesa_bitcount(double_storage_locations
);
2639 if (total_attribs_size
> max_index
) {
2641 "attempt to use %d vertex attribute slots only %d available ",
2642 total_attribs_size
, max_index
);
2647 /* If all of the attributes were assigned locations by the application (or
2648 * are built-in attributes with fixed locations), return early. This should
2649 * be the common case.
2654 qsort(to_assign
, num_attr
, sizeof(to_assign
[0]), temp_attr::compare
);
2656 if (target_index
== MESA_SHADER_VERTEX
) {
2657 /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS. It can
2658 * only be explicitly assigned by via glBindAttribLocation. Mark it as
2659 * reserved to prevent it from being automatically allocated below.
2661 find_deref_visitor
find("gl_Vertex");
2663 if (find
.variable_found())
2664 used_locations
|= (1 << 0);
2667 for (unsigned i
= 0; i
< num_attr
; i
++) {
2668 /* Mask representing the contiguous slots that will be used by this
2671 const unsigned use_mask
= (1 << to_assign
[i
].slots
) - 1;
2673 int location
= find_available_slots(used_locations
, to_assign
[i
].slots
);
2676 const char *const string
= (target_index
== MESA_SHADER_VERTEX
)
2677 ? "vertex shader input" : "fragment shader output";
2680 "insufficient contiguous locations "
2681 "available for %s `%s'\n",
2682 string
, to_assign
[i
].var
->name
);
2686 to_assign
[i
].var
->data
.location
= generic_base
+ location
;
2687 to_assign
[i
].var
->data
.is_unmatched_generic_inout
= 0;
2688 used_locations
|= (use_mask
<< location
);
2695 * Match explicit locations of outputs to inputs and deactivate the
2696 * unmatch flag if found so we don't optimise them away.
2699 match_explicit_outputs_to_inputs(struct gl_shader_program
*prog
,
2700 gl_shader
*producer
,
2701 gl_shader
*consumer
)
2703 glsl_symbol_table parameters
;
2704 ir_variable
*explicit_locations
[MAX_VARYING
] = { NULL
};
2706 /* Find all shader outputs in the "producer" stage.
2708 foreach_in_list(ir_instruction
, node
, producer
->ir
) {
2709 ir_variable
*const var
= node
->as_variable();
2711 if ((var
== NULL
) || (var
->data
.mode
!= ir_var_shader_out
))
2714 if (var
->data
.explicit_location
&&
2715 var
->data
.location
>= VARYING_SLOT_VAR0
) {
2716 const unsigned idx
= var
->data
.location
- VARYING_SLOT_VAR0
;
2717 if (explicit_locations
[idx
] == NULL
)
2718 explicit_locations
[idx
] = var
;
2722 /* Match inputs to outputs */
2723 foreach_in_list(ir_instruction
, node
, consumer
->ir
) {
2724 ir_variable
*const input
= node
->as_variable();
2726 if ((input
== NULL
) || (input
->data
.mode
!= ir_var_shader_in
))
2729 ir_variable
*output
= NULL
;
2730 if (input
->data
.explicit_location
2731 && input
->data
.location
>= VARYING_SLOT_VAR0
) {
2732 output
= explicit_locations
[input
->data
.location
- VARYING_SLOT_VAR0
];
2734 if (output
!= NULL
){
2735 input
->data
.is_unmatched_generic_inout
= 0;
2736 output
->data
.is_unmatched_generic_inout
= 0;
2743 * Store the gl_FragDepth layout in the gl_shader_program struct.
2746 store_fragdepth_layout(struct gl_shader_program
*prog
)
2748 if (prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
] == NULL
) {
2752 struct exec_list
*ir
= prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
]->ir
;
2754 /* We don't look up the gl_FragDepth symbol directly because if
2755 * gl_FragDepth is not used in the shader, it's removed from the IR.
2756 * However, the symbol won't be removed from the symbol table.
2758 * We're only interested in the cases where the variable is NOT removed
2761 foreach_in_list(ir_instruction
, node
, ir
) {
2762 ir_variable
*const var
= node
->as_variable();
2764 if (var
== NULL
|| var
->data
.mode
!= ir_var_shader_out
) {
2768 if (strcmp(var
->name
, "gl_FragDepth") == 0) {
2769 switch (var
->data
.depth_layout
) {
2770 case ir_depth_layout_none
:
2771 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_NONE
;
2773 case ir_depth_layout_any
:
2774 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_ANY
;
2776 case ir_depth_layout_greater
:
2777 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_GREATER
;
2779 case ir_depth_layout_less
:
2780 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_LESS
;
2782 case ir_depth_layout_unchanged
:
2783 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2794 * Validate the resources used by a program versus the implementation limits
2797 check_resources(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
2799 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2800 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
2805 if (sh
->num_samplers
> ctx
->Const
.Program
[i
].MaxTextureImageUnits
) {
2806 linker_error(prog
, "Too many %s shader texture samplers\n",
2807 _mesa_shader_stage_to_string(i
));
2810 if (sh
->num_uniform_components
>
2811 ctx
->Const
.Program
[i
].MaxUniformComponents
) {
2812 if (ctx
->Const
.GLSLSkipStrictMaxUniformLimitCheck
) {
2813 linker_warning(prog
, "Too many %s shader default uniform block "
2814 "components, but the driver will try to optimize "
2815 "them out; this is non-portable out-of-spec "
2817 _mesa_shader_stage_to_string(i
));
2819 linker_error(prog
, "Too many %s shader default uniform block "
2821 _mesa_shader_stage_to_string(i
));
2825 if (sh
->num_combined_uniform_components
>
2826 ctx
->Const
.Program
[i
].MaxCombinedUniformComponents
) {
2827 if (ctx
->Const
.GLSLSkipStrictMaxUniformLimitCheck
) {
2828 linker_warning(prog
, "Too many %s shader uniform components, "
2829 "but the driver will try to optimize them out; "
2830 "this is non-portable out-of-spec behavior\n",
2831 _mesa_shader_stage_to_string(i
));
2833 linker_error(prog
, "Too many %s shader uniform components\n",
2834 _mesa_shader_stage_to_string(i
));
2839 unsigned blocks
[MESA_SHADER_STAGES
] = {0};
2840 unsigned total_uniform_blocks
= 0;
2841 unsigned shader_blocks
[MESA_SHADER_STAGES
] = {0};
2842 unsigned total_shader_storage_blocks
= 0;
2844 for (unsigned i
= 0; i
< prog
->NumBufferInterfaceBlocks
; i
++) {
2845 /* Don't check SSBOs for Uniform Block Size */
2846 if (!prog
->BufferInterfaceBlocks
[i
].IsShaderStorage
&&
2847 prog
->BufferInterfaceBlocks
[i
].UniformBufferSize
> ctx
->Const
.MaxUniformBlockSize
) {
2848 linker_error(prog
, "Uniform block %s too big (%d/%d)\n",
2849 prog
->BufferInterfaceBlocks
[i
].Name
,
2850 prog
->BufferInterfaceBlocks
[i
].UniformBufferSize
,
2851 ctx
->Const
.MaxUniformBlockSize
);
2854 if (prog
->BufferInterfaceBlocks
[i
].IsShaderStorage
&&
2855 prog
->BufferInterfaceBlocks
[i
].UniformBufferSize
> ctx
->Const
.MaxShaderStorageBlockSize
) {
2856 linker_error(prog
, "Shader storage block %s too big (%d/%d)\n",
2857 prog
->BufferInterfaceBlocks
[i
].Name
,
2858 prog
->BufferInterfaceBlocks
[i
].UniformBufferSize
,
2859 ctx
->Const
.MaxShaderStorageBlockSize
);
2862 for (unsigned j
= 0; j
< MESA_SHADER_STAGES
; j
++) {
2863 if (prog
->InterfaceBlockStageIndex
[j
][i
] != -1) {
2864 struct gl_shader
*sh
= prog
->_LinkedShaders
[j
];
2865 int stage_index
= prog
->InterfaceBlockStageIndex
[j
][i
];
2866 if (sh
&& sh
->BufferInterfaceBlocks
[stage_index
].IsShaderStorage
) {
2868 total_shader_storage_blocks
++;
2871 total_uniform_blocks
++;
2876 if (total_uniform_blocks
> ctx
->Const
.MaxCombinedUniformBlocks
) {
2877 linker_error(prog
, "Too many combined uniform blocks (%d/%d)\n",
2878 total_uniform_blocks
,
2879 ctx
->Const
.MaxCombinedUniformBlocks
);
2881 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2882 const unsigned max_uniform_blocks
=
2883 ctx
->Const
.Program
[i
].MaxUniformBlocks
;
2884 if (blocks
[i
] > max_uniform_blocks
) {
2885 linker_error(prog
, "Too many %s uniform blocks (%d/%d)\n",
2886 _mesa_shader_stage_to_string(i
),
2888 max_uniform_blocks
);
2894 if (total_shader_storage_blocks
> ctx
->Const
.MaxCombinedShaderStorageBlocks
) {
2895 linker_error(prog
, "Too many combined shader storage blocks (%d/%d)\n",
2896 total_shader_storage_blocks
,
2897 ctx
->Const
.MaxCombinedShaderStorageBlocks
);
2899 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2900 const unsigned max_shader_storage_blocks
=
2901 ctx
->Const
.Program
[i
].MaxShaderStorageBlocks
;
2902 if (shader_blocks
[i
] > max_shader_storage_blocks
) {
2903 linker_error(prog
, "Too many %s shader storage blocks (%d/%d)\n",
2904 _mesa_shader_stage_to_string(i
),
2906 max_shader_storage_blocks
);
2915 link_calculate_subroutine_compat(struct gl_shader_program
*prog
)
2917 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2918 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
2923 for (unsigned j
= 0; j
< sh
->NumSubroutineUniformRemapTable
; j
++) {
2924 struct gl_uniform_storage
*uni
= sh
->SubroutineUniformRemapTable
[j
];
2930 for (unsigned f
= 0; f
< sh
->NumSubroutineFunctions
; f
++) {
2931 struct gl_subroutine_function
*fn
= &sh
->SubroutineFunctions
[f
];
2932 for (int k
= 0; k
< fn
->num_compat_types
; k
++) {
2933 if (fn
->types
[k
] == uni
->type
) {
2939 uni
->num_compatible_subroutines
= count
;
2945 check_subroutine_resources(struct gl_shader_program
*prog
)
2947 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2948 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
2951 if (sh
->NumSubroutineUniformRemapTable
> MAX_SUBROUTINE_UNIFORM_LOCATIONS
)
2952 linker_error(prog
, "Too many %s shader subroutine uniforms\n",
2953 _mesa_shader_stage_to_string(i
));
2958 * Validate shader image resources.
2961 check_image_resources(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
2963 unsigned total_image_units
= 0;
2964 unsigned fragment_outputs
= 0;
2965 unsigned total_shader_storage_blocks
= 0;
2967 if (!ctx
->Extensions
.ARB_shader_image_load_store
)
2970 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2971 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
2974 if (sh
->NumImages
> ctx
->Const
.Program
[i
].MaxImageUniforms
)
2975 linker_error(prog
, "Too many %s shader image uniforms (%u > %u)\n",
2976 _mesa_shader_stage_to_string(i
), sh
->NumImages
,
2977 ctx
->Const
.Program
[i
].MaxImageUniforms
);
2979 total_image_units
+= sh
->NumImages
;
2981 for (unsigned j
= 0; j
< prog
->NumBufferInterfaceBlocks
; j
++) {
2982 int stage_index
= prog
->InterfaceBlockStageIndex
[i
][j
];
2983 if (stage_index
!= -1 && sh
->BufferInterfaceBlocks
[stage_index
].IsShaderStorage
)
2984 total_shader_storage_blocks
++;
2987 if (i
== MESA_SHADER_FRAGMENT
) {
2988 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
2989 ir_variable
*var
= node
->as_variable();
2990 if (var
&& var
->data
.mode
== ir_var_shader_out
)
2991 /* since there are no double fs outputs - pass false */
2992 fragment_outputs
+= var
->type
->count_attribute_slots(false);
2998 if (total_image_units
> ctx
->Const
.MaxCombinedImageUniforms
)
2999 linker_error(prog
, "Too many combined image uniforms\n");
3001 if (total_image_units
+ fragment_outputs
+ total_shader_storage_blocks
>
3002 ctx
->Const
.MaxCombinedShaderOutputResources
)
3003 linker_error(prog
, "Too many combined image uniforms, shader storage "
3004 " buffers and fragment outputs\n");
3009 * Initializes explicit location slots to INACTIVE_UNIFORM_EXPLICIT_LOCATION
3010 * for a variable, checks for overlaps between other uniforms using explicit
3014 reserve_explicit_locations(struct gl_shader_program
*prog
,
3015 string_to_uint_map
*map
, ir_variable
*var
)
3017 unsigned slots
= var
->type
->uniform_locations();
3018 unsigned max_loc
= var
->data
.location
+ slots
- 1;
3020 /* Resize remap table if locations do not fit in the current one. */
3021 if (max_loc
+ 1 > prog
->NumUniformRemapTable
) {
3022 prog
->UniformRemapTable
=
3023 reralloc(prog
, prog
->UniformRemapTable
,
3024 gl_uniform_storage
*,
3027 if (!prog
->UniformRemapTable
) {
3028 linker_error(prog
, "Out of memory during linking.\n");
3032 /* Initialize allocated space. */
3033 for (unsigned i
= prog
->NumUniformRemapTable
; i
< max_loc
+ 1; i
++)
3034 prog
->UniformRemapTable
[i
] = NULL
;
3036 prog
->NumUniformRemapTable
= max_loc
+ 1;
3039 for (unsigned i
= 0; i
< slots
; i
++) {
3040 unsigned loc
= var
->data
.location
+ i
;
3042 /* Check if location is already used. */
3043 if (prog
->UniformRemapTable
[loc
] == INACTIVE_UNIFORM_EXPLICIT_LOCATION
) {
3045 /* Possibly same uniform from a different stage, this is ok. */
3047 if (map
->get(hash_loc
, var
->name
) && hash_loc
== loc
- i
)
3050 /* ARB_explicit_uniform_location specification states:
3052 * "No two default-block uniform variables in the program can have
3053 * the same location, even if they are unused, otherwise a compiler
3054 * or linker error will be generated."
3057 "location qualifier for uniform %s overlaps "
3058 "previously used location\n",
3063 /* Initialize location as inactive before optimization
3064 * rounds and location assignment.
3066 prog
->UniformRemapTable
[loc
] = INACTIVE_UNIFORM_EXPLICIT_LOCATION
;
3069 /* Note, base location used for arrays. */
3070 map
->put(var
->data
.location
, var
->name
);
3076 reserve_subroutine_explicit_locations(struct gl_shader_program
*prog
,
3077 struct gl_shader
*sh
,
3080 unsigned slots
= var
->type
->uniform_locations();
3081 unsigned max_loc
= var
->data
.location
+ slots
- 1;
3083 /* Resize remap table if locations do not fit in the current one. */
3084 if (max_loc
+ 1 > sh
->NumSubroutineUniformRemapTable
) {
3085 sh
->SubroutineUniformRemapTable
=
3086 reralloc(sh
, sh
->SubroutineUniformRemapTable
,
3087 gl_uniform_storage
*,
3090 if (!sh
->SubroutineUniformRemapTable
) {
3091 linker_error(prog
, "Out of memory during linking.\n");
3095 /* Initialize allocated space. */
3096 for (unsigned i
= sh
->NumSubroutineUniformRemapTable
; i
< max_loc
+ 1; i
++)
3097 sh
->SubroutineUniformRemapTable
[i
] = NULL
;
3099 sh
->NumSubroutineUniformRemapTable
= max_loc
+ 1;
3102 for (unsigned i
= 0; i
< slots
; i
++) {
3103 unsigned loc
= var
->data
.location
+ i
;
3105 /* Check if location is already used. */
3106 if (sh
->SubroutineUniformRemapTable
[loc
] == INACTIVE_UNIFORM_EXPLICIT_LOCATION
) {
3108 /* ARB_explicit_uniform_location specification states:
3109 * "No two subroutine uniform variables can have the same location
3110 * in the same shader stage, otherwise a compiler or linker error
3111 * will be generated."
3114 "location qualifier for uniform %s overlaps "
3115 "previously used location\n",
3120 /* Initialize location as inactive before optimization
3121 * rounds and location assignment.
3123 sh
->SubroutineUniformRemapTable
[loc
] = INACTIVE_UNIFORM_EXPLICIT_LOCATION
;
3129 * Check and reserve all explicit uniform locations, called before
3130 * any optimizations happen to handle also inactive uniforms and
3131 * inactive array elements that may get trimmed away.
3134 check_explicit_uniform_locations(struct gl_context
*ctx
,
3135 struct gl_shader_program
*prog
)
3137 if (!ctx
->Extensions
.ARB_explicit_uniform_location
)
3140 /* This map is used to detect if overlapping explicit locations
3141 * occur with the same uniform (from different stage) or a different one.
3143 string_to_uint_map
*uniform_map
= new string_to_uint_map
;
3146 linker_error(prog
, "Out of memory during linking.\n");
3150 unsigned entries_total
= 0;
3151 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3152 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
3157 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
3158 ir_variable
*var
= node
->as_variable();
3159 if (!var
|| var
->data
.mode
!= ir_var_uniform
)
3162 entries_total
+= var
->type
->uniform_locations();
3164 if (var
->data
.explicit_location
) {
3166 if (var
->type
->without_array()->is_subroutine())
3167 ret
= reserve_subroutine_explicit_locations(prog
, sh
, var
);
3169 ret
= reserve_explicit_locations(prog
, uniform_map
, var
);
3178 /* Verify that total amount of entries for explicit and implicit locations
3179 * is less than MAX_UNIFORM_LOCATIONS.
3181 if (entries_total
>= ctx
->Const
.MaxUserAssignableUniformLocations
) {
3182 linker_error(prog
, "count of uniform locations >= MAX_UNIFORM_LOCATIONS"
3183 "(%u >= %u)", entries_total
,
3184 ctx
->Const
.MaxUserAssignableUniformLocations
);
3190 should_add_buffer_variable(struct gl_shader_program
*shProg
,
3191 GLenum type
, const char *name
)
3193 bool found_interface
= false;
3194 unsigned block_name_len
= 0;
3195 const char *block_name_dot
= strchr(name
, '.');
3197 /* These rules only apply to buffer variables. So we return
3198 * true for the rest of types.
3200 if (type
!= GL_BUFFER_VARIABLE
)
3203 for (unsigned i
= 0; i
< shProg
->NumBufferInterfaceBlocks
; i
++) {
3204 const char *block_name
= shProg
->BufferInterfaceBlocks
[i
].Name
;
3205 block_name_len
= strlen(block_name
);
3207 const char *block_square_bracket
= strchr(block_name
, '[');
3208 if (block_square_bracket
) {
3209 /* The block is part of an array of named interfaces,
3210 * for the name comparison we ignore the "[x]" part.
3212 block_name_len
-= strlen(block_square_bracket
);
3215 if (block_name_dot
) {
3216 /* Check if the variable name starts with the interface
3217 * name. The interface name (if present) should have the
3218 * length than the interface block name we are comparing to.
3220 unsigned len
= strlen(name
) - strlen(block_name_dot
);
3221 if (len
!= block_name_len
)
3225 if (strncmp(block_name
, name
, block_name_len
) == 0) {
3226 found_interface
= true;
3231 /* We remove the interface name from the buffer variable name,
3232 * including the dot that follows it.
3234 if (found_interface
)
3235 name
= name
+ block_name_len
+ 1;
3237 /* From: ARB_program_interface_query extension:
3239 * "For an active shader storage block member declared as an array, an
3240 * entry will be generated only for the first array element, regardless
3241 * of its type. For arrays of aggregate types, the enumeration rules are
3242 * applied recursively for the single enumerated array element.
3244 const char *struct_first_dot
= strchr(name
, '.');
3245 const char *first_square_bracket
= strchr(name
, '[');
3247 /* The buffer variable is on top level and it is not an array */
3248 if (!first_square_bracket
) {
3250 /* The shader storage block member is a struct, then generate the entry */
3251 } else if (struct_first_dot
&& struct_first_dot
< first_square_bracket
) {
3254 /* Shader storage block member is an array, only generate an entry for the
3255 * first array element.
3257 if (strncmp(first_square_bracket
, "[0]", 3) == 0)
3265 add_program_resource(struct gl_shader_program
*prog
, GLenum type
,
3266 const void *data
, uint8_t stages
)
3270 /* If resource already exists, do not add it again. */
3271 for (unsigned i
= 0; i
< prog
->NumProgramResourceList
; i
++)
3272 if (prog
->ProgramResourceList
[i
].Data
== data
)
3275 prog
->ProgramResourceList
=
3277 prog
->ProgramResourceList
,
3278 gl_program_resource
,
3279 prog
->NumProgramResourceList
+ 1);
3281 if (!prog
->ProgramResourceList
) {
3282 linker_error(prog
, "Out of memory during linking.\n");
3286 struct gl_program_resource
*res
=
3287 &prog
->ProgramResourceList
[prog
->NumProgramResourceList
];
3291 res
->StageReferences
= stages
;
3293 prog
->NumProgramResourceList
++;
3298 /* Function checks if a variable var is a packed varying and
3299 * if given name is part of packed varying's list.
3301 * If a variable is a packed varying, it has a name like
3302 * 'packed:a,b,c' where a, b and c are separate variables.
3305 included_in_packed_varying(ir_variable
*var
, const char *name
)
3307 if (strncmp(var
->name
, "packed:", 7) != 0)
3310 char *list
= strdup(var
->name
+ 7);
3315 char *token
= strtok_r(list
, ",", &saveptr
);
3317 if (strcmp(token
, name
) == 0) {
3321 token
= strtok_r(NULL
, ",", &saveptr
);
3328 * Function builds a stage reference bitmask from variable name.
3331 build_stageref(struct gl_shader_program
*shProg
, const char *name
,
3336 /* Note, that we assume MAX 8 stages, if there will be more stages, type
3337 * used for reference mask in gl_program_resource will need to be changed.
3339 assert(MESA_SHADER_STAGES
< 8);
3341 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3342 struct gl_shader
*sh
= shProg
->_LinkedShaders
[i
];
3346 /* Shader symbol table may contain variables that have
3347 * been optimized away. Search IR for the variable instead.
3349 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
3350 ir_variable
*var
= node
->as_variable();
3352 unsigned baselen
= strlen(var
->name
);
3354 if (included_in_packed_varying(var
, name
)) {
3359 /* Type needs to match if specified, otherwise we might
3360 * pick a variable with same name but different interface.
3362 if (var
->data
.mode
!= mode
)
3365 if (strncmp(var
->name
, name
, baselen
) == 0) {
3366 /* Check for exact name matches but also check for arrays and
3369 if (name
[baselen
] == '\0' ||
3370 name
[baselen
] == '[' ||
3371 name
[baselen
] == '.') {
3383 * Create gl_shader_variable from ir_variable class.
3385 static gl_shader_variable
*
3386 create_shader_variable(struct gl_shader_program
*shProg
, const ir_variable
*in
)
3388 gl_shader_variable
*out
= ralloc(shProg
, struct gl_shader_variable
);
3392 out
->type
= in
->type
;
3393 out
->name
= ralloc_strdup(shProg
, in
->name
);
3398 out
->location
= in
->data
.location
;
3399 out
->index
= in
->data
.index
;
3400 out
->patch
= in
->data
.patch
;
3401 out
->mode
= in
->data
.mode
;
3407 add_interface_variables(struct gl_shader_program
*shProg
,
3408 exec_list
*ir
, GLenum programInterface
)
3410 foreach_in_list(ir_instruction
, node
, ir
) {
3411 ir_variable
*var
= node
->as_variable();
3417 switch (var
->data
.mode
) {
3418 /* From GL 4.3 core spec, section 11.1.1 (Vertex Attributes):
3419 * "For GetActiveAttrib, all active vertex shader input variables
3420 * are enumerated, including the special built-in inputs gl_VertexID
3421 * and gl_InstanceID."
3423 case ir_var_system_value
:
3424 if (var
->data
.location
!= SYSTEM_VALUE_VERTEX_ID
&&
3425 var
->data
.location
!= SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
&&
3426 var
->data
.location
!= SYSTEM_VALUE_INSTANCE_ID
)
3428 /* Mark special built-in inputs referenced by the vertex stage so
3429 * that they are considered active by the shader queries.
3431 mask
= (1 << (MESA_SHADER_VERTEX
));
3433 case ir_var_shader_in
:
3434 if (programInterface
!= GL_PROGRAM_INPUT
)
3437 case ir_var_shader_out
:
3438 if (programInterface
!= GL_PROGRAM_OUTPUT
)
3445 /* Skip packed varyings, packed varyings are handled separately
3446 * by add_packed_varyings.
3448 if (strncmp(var
->name
, "packed:", 7) == 0)
3451 /* Skip fragdata arrays, these are handled separately
3452 * by add_fragdata_arrays.
3454 if (strncmp(var
->name
, "gl_out_FragData", 15) == 0)
3457 gl_shader_variable
*sha_v
= create_shader_variable(shProg
, var
);
3461 if (!add_program_resource(shProg
, programInterface
, sha_v
,
3462 build_stageref(shProg
, sha_v
->name
,
3463 sha_v
->mode
) | mask
))
3470 add_packed_varyings(struct gl_shader_program
*shProg
, int stage
, GLenum type
)
3472 struct gl_shader
*sh
= shProg
->_LinkedShaders
[stage
];
3475 if (!sh
|| !sh
->packed_varyings
)
3478 foreach_in_list(ir_instruction
, node
, sh
->packed_varyings
) {
3479 ir_variable
*var
= node
->as_variable();
3481 switch (var
->data
.mode
) {
3482 case ir_var_shader_in
:
3483 iface
= GL_PROGRAM_INPUT
;
3485 case ir_var_shader_out
:
3486 iface
= GL_PROGRAM_OUTPUT
;
3489 unreachable("unexpected type");
3492 if (type
== iface
) {
3493 gl_shader_variable
*sha_v
= create_shader_variable(shProg
, var
);
3496 if (!add_program_resource(shProg
, iface
, sha_v
,
3497 build_stageref(shProg
, sha_v
->name
,
3507 add_fragdata_arrays(struct gl_shader_program
*shProg
)
3509 struct gl_shader
*sh
= shProg
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
3511 if (!sh
|| !sh
->fragdata_arrays
)
3514 foreach_in_list(ir_instruction
, node
, sh
->fragdata_arrays
) {
3515 ir_variable
*var
= node
->as_variable();
3517 assert(var
->data
.mode
== ir_var_shader_out
);
3518 gl_shader_variable
*sha_v
= create_shader_variable(shProg
, var
);
3521 if (!add_program_resource(shProg
, GL_PROGRAM_OUTPUT
, sha_v
,
3522 1 << MESA_SHADER_FRAGMENT
))
3530 get_top_level_name(const char *name
)
3532 const char *first_dot
= strchr(name
, '.');
3533 const char *first_square_bracket
= strchr(name
, '[');
3535 /* From ARB_program_interface_query spec:
3537 * "For the property TOP_LEVEL_ARRAY_SIZE, a single integer identifying the
3538 * number of active array elements of the top-level shader storage block
3539 * member containing to the active variable is written to <params>. If the
3540 * top-level block member is not declared as an array, the value one is
3541 * written to <params>. If the top-level block member is an array with no
3542 * declared size, the value zero is written to <params>.
3545 /* The buffer variable is on top level.*/
3546 if (!first_square_bracket
&& !first_dot
)
3547 name_size
= strlen(name
);
3548 else if ((!first_square_bracket
||
3549 (first_dot
&& first_dot
< first_square_bracket
)))
3550 name_size
= first_dot
- name
;
3552 name_size
= first_square_bracket
- name
;
3554 return strndup(name
, name_size
);
3558 get_var_name(const char *name
)
3560 const char *first_dot
= strchr(name
, '.');
3563 return strdup(name
);
3565 return strndup(first_dot
+1, strlen(first_dot
) - 1);
3569 is_top_level_shader_storage_block_member(const char* name
,
3570 const char* interface_name
,
3571 const char* field_name
)
3573 bool result
= false;
3575 /* If the given variable is already a top-level shader storage
3576 * block member, then return array_size = 1.
3577 * We could have two possibilities: if we have an instanced
3578 * shader storage block or not instanced.
3580 * For the first, we check create a name as it was in top level and
3581 * compare it with the real name. If they are the same, then
3582 * the variable is already at top-level.
3584 * Full instanced name is: interface name + '.' + var name +
3587 int name_length
= strlen(interface_name
) + 1 + strlen(field_name
) + 1;
3588 char *full_instanced_name
= (char *) calloc(name_length
, sizeof(char));
3589 if (!full_instanced_name
) {
3590 fprintf(stderr
, "%s: Cannot allocate space for name\n", __func__
);
3594 snprintf(full_instanced_name
, name_length
, "%s.%s",
3595 interface_name
, field_name
);
3597 /* Check if its top-level shader storage block member of an
3598 * instanced interface block, or of a unnamed interface block.
3600 if (strcmp(name
, full_instanced_name
) == 0 ||
3601 strcmp(name
, field_name
) == 0)
3604 free(full_instanced_name
);
3609 get_array_size(struct gl_uniform_storage
*uni
, const glsl_struct_field
*field
,
3610 char *interface_name
, char *var_name
)
3612 /* From GL_ARB_program_interface_query spec:
3614 * "For the property TOP_LEVEL_ARRAY_SIZE, a single integer
3615 * identifying the number of active array elements of the top-level
3616 * shader storage block member containing to the active variable is
3617 * written to <params>. If the top-level block member is not
3618 * declared as an array, the value one is written to <params>. If
3619 * the top-level block member is an array with no declared size,
3620 * the value zero is written to <params>.
3622 if (is_top_level_shader_storage_block_member(uni
->name
,
3626 else if (field
->type
->is_unsized_array())
3628 else if (field
->type
->is_array())
3629 return field
->type
->length
;
3635 get_array_stride(struct gl_uniform_storage
*uni
, const glsl_type
*interface
,
3636 const glsl_struct_field
*field
, char *interface_name
,
3639 /* From GL_ARB_program_interface_query:
3641 * "For the property TOP_LEVEL_ARRAY_STRIDE, a single integer
3642 * identifying the stride between array elements of the top-level
3643 * shader storage block member containing the active variable is
3644 * written to <params>. For top-level block members declared as
3645 * arrays, the value written is the difference, in basic machine
3646 * units, between the offsets of the active variable for
3647 * consecutive elements in the top-level array. For top-level
3648 * block members not declared as an array, zero is written to
3651 if (field
->type
->is_array()) {
3652 const enum glsl_matrix_layout matrix_layout
=
3653 glsl_matrix_layout(field
->matrix_layout
);
3654 bool row_major
= matrix_layout
== GLSL_MATRIX_LAYOUT_ROW_MAJOR
;
3655 const glsl_type
*array_type
= field
->type
->fields
.array
;
3657 if (is_top_level_shader_storage_block_member(uni
->name
,
3662 if (interface
->interface_packing
!= GLSL_INTERFACE_PACKING_STD430
) {
3663 if (array_type
->is_record() || array_type
->is_array())
3664 return glsl_align(array_type
->std140_size(row_major
), 16);
3666 return MAX2(array_type
->std140_base_alignment(row_major
), 16);
3668 return array_type
->std430_array_stride(row_major
);
3675 calculate_array_size_and_stride(struct gl_shader_program
*shProg
,
3676 struct gl_uniform_storage
*uni
)
3678 int block_index
= uni
->block_index
;
3679 int array_size
= -1;
3680 int array_stride
= -1;
3681 char *var_name
= get_top_level_name(uni
->name
);
3682 char *interface_name
=
3683 get_top_level_name(shProg
->BufferInterfaceBlocks
[block_index
].Name
);
3685 if (strcmp(var_name
, interface_name
) == 0) {
3686 /* Deal with instanced array of SSBOs */
3687 char *temp_name
= get_var_name(uni
->name
);
3689 linker_error(shProg
, "Out of memory during linking.\n");
3690 goto write_top_level_array_size_and_stride
;
3693 var_name
= get_top_level_name(temp_name
);
3696 linker_error(shProg
, "Out of memory during linking.\n");
3697 goto write_top_level_array_size_and_stride
;
3701 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
3702 if (shProg
->Shaders
[i
] == NULL
)
3705 const gl_shader
*stage
= shProg
->Shaders
[i
];
3706 foreach_in_list(ir_instruction
, node
, stage
->ir
) {
3707 ir_variable
*var
= node
->as_variable();
3708 if (!var
|| !var
->get_interface_type() ||
3709 var
->data
.mode
!= ir_var_shader_storage
)
3712 const glsl_type
*interface
= var
->get_interface_type();
3714 if (strcmp(interface_name
, interface
->name
) != 0)
3717 for (unsigned i
= 0; i
< interface
->length
; i
++) {
3718 const glsl_struct_field
*field
= &interface
->fields
.structure
[i
];
3719 if (strcmp(field
->name
, var_name
) != 0)
3722 array_stride
= get_array_stride(uni
, interface
, field
,
3723 interface_name
, var_name
);
3724 array_size
= get_array_size(uni
, field
, interface_name
, var_name
);
3725 goto write_top_level_array_size_and_stride
;
3729 write_top_level_array_size_and_stride
:
3730 free(interface_name
);
3732 uni
->top_level_array_stride
= array_stride
;
3733 uni
->top_level_array_size
= array_size
;
3737 * Builds up a list of program resources that point to existing
3741 build_program_resource_list(struct gl_shader_program
*shProg
)
3743 /* Rebuild resource list. */
3744 if (shProg
->ProgramResourceList
) {
3745 ralloc_free(shProg
->ProgramResourceList
);
3746 shProg
->ProgramResourceList
= NULL
;
3747 shProg
->NumProgramResourceList
= 0;
3750 int input_stage
= MESA_SHADER_STAGES
, output_stage
= 0;
3752 /* Determine first input and final output stage. These are used to
3753 * detect which variables should be enumerated in the resource list
3754 * for GL_PROGRAM_INPUT and GL_PROGRAM_OUTPUT.
3756 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3757 if (!shProg
->_LinkedShaders
[i
])
3759 if (input_stage
== MESA_SHADER_STAGES
)
3764 /* Empty shader, no resources. */
3765 if (input_stage
== MESA_SHADER_STAGES
&& output_stage
== 0)
3768 /* Program interface needs to expose varyings in case of SSO. */
3769 if (shProg
->SeparateShader
) {
3770 if (!add_packed_varyings(shProg
, input_stage
, GL_PROGRAM_INPUT
))
3773 if (!add_packed_varyings(shProg
, output_stage
, GL_PROGRAM_OUTPUT
))
3777 if (!add_fragdata_arrays(shProg
))
3780 /* Add inputs and outputs to the resource list. */
3781 if (!add_interface_variables(shProg
, shProg
->_LinkedShaders
[input_stage
]->ir
,
3785 if (!add_interface_variables(shProg
, shProg
->_LinkedShaders
[output_stage
]->ir
,
3789 /* Add transform feedback varyings. */
3790 if (shProg
->LinkedTransformFeedback
.NumVarying
> 0) {
3791 for (int i
= 0; i
< shProg
->LinkedTransformFeedback
.NumVarying
; i
++) {
3792 if (!add_program_resource(shProg
, GL_TRANSFORM_FEEDBACK_VARYING
,
3793 &shProg
->LinkedTransformFeedback
.Varyings
[i
],
3799 /* Add uniforms from uniform storage. */
3800 for (unsigned i
= 0; i
< shProg
->NumUniformStorage
; i
++) {
3801 /* Do not add uniforms internally used by Mesa. */
3802 if (shProg
->UniformStorage
[i
].hidden
)
3806 build_stageref(shProg
, shProg
->UniformStorage
[i
].name
,
3809 /* Add stagereferences for uniforms in a uniform block. */
3810 int block_index
= shProg
->UniformStorage
[i
].block_index
;
3811 if (block_index
!= -1) {
3812 for (unsigned j
= 0; j
< MESA_SHADER_STAGES
; j
++) {
3813 if (shProg
->InterfaceBlockStageIndex
[j
][block_index
] != -1)
3814 stageref
|= (1 << j
);
3818 bool is_shader_storage
= shProg
->UniformStorage
[i
].is_shader_storage
;
3819 GLenum type
= is_shader_storage
? GL_BUFFER_VARIABLE
: GL_UNIFORM
;
3820 if (!should_add_buffer_variable(shProg
, type
,
3821 shProg
->UniformStorage
[i
].name
))
3824 if (is_shader_storage
) {
3825 calculate_array_size_and_stride(shProg
, &shProg
->UniformStorage
[i
]);
3828 if (!add_program_resource(shProg
, type
,
3829 &shProg
->UniformStorage
[i
], stageref
))
3833 /* Add program uniform blocks and shader storage blocks. */
3834 for (unsigned i
= 0; i
< shProg
->NumBufferInterfaceBlocks
; i
++) {
3835 bool is_shader_storage
= shProg
->BufferInterfaceBlocks
[i
].IsShaderStorage
;
3836 GLenum type
= is_shader_storage
? GL_SHADER_STORAGE_BLOCK
: GL_UNIFORM_BLOCK
;
3837 if (!add_program_resource(shProg
, type
,
3838 &shProg
->BufferInterfaceBlocks
[i
], 0))
3842 /* Add atomic counter buffers. */
3843 for (unsigned i
= 0; i
< shProg
->NumAtomicBuffers
; i
++) {
3844 if (!add_program_resource(shProg
, GL_ATOMIC_COUNTER_BUFFER
,
3845 &shProg
->AtomicBuffers
[i
], 0))
3849 for (unsigned i
= 0; i
< shProg
->NumUniformStorage
; i
++) {
3851 if (!shProg
->UniformStorage
[i
].hidden
)
3854 for (int j
= MESA_SHADER_VERTEX
; j
< MESA_SHADER_STAGES
; j
++) {
3855 if (!shProg
->UniformStorage
[i
].opaque
[j
].active
||
3856 !shProg
->UniformStorage
[i
].type
->is_subroutine())
3859 type
= _mesa_shader_stage_to_subroutine_uniform((gl_shader_stage
)j
);
3860 /* add shader subroutines */
3861 if (!add_program_resource(shProg
, type
, &shProg
->UniformStorage
[i
], 0))
3866 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3867 struct gl_shader
*sh
= shProg
->_LinkedShaders
[i
];
3873 type
= _mesa_shader_stage_to_subroutine((gl_shader_stage
)i
);
3874 for (unsigned j
= 0; j
< sh
->NumSubroutineFunctions
; j
++) {
3875 if (!add_program_resource(shProg
, type
, &sh
->SubroutineFunctions
[j
], 0))
3882 * This check is done to make sure we allow only constant expression
3883 * indexing and "constant-index-expression" (indexing with an expression
3884 * that includes loop induction variable).
3887 validate_sampler_array_indexing(struct gl_context
*ctx
,
3888 struct gl_shader_program
*prog
)
3890 dynamic_sampler_array_indexing_visitor v
;
3891 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3892 if (prog
->_LinkedShaders
[i
] == NULL
)
3895 bool no_dynamic_indexing
=
3896 ctx
->Const
.ShaderCompilerOptions
[i
].EmitNoIndirectSampler
;
3898 /* Search for array derefs in shader. */
3899 v
.run(prog
->_LinkedShaders
[i
]->ir
);
3900 if (v
.uses_dynamic_sampler_array_indexing()) {
3901 const char *msg
= "sampler arrays indexed with non-constant "
3902 "expressions is forbidden in GLSL %s %u";
3903 /* Backend has indicated that it has no dynamic indexing support. */
3904 if (no_dynamic_indexing
) {
3905 linker_error(prog
, msg
, prog
->IsES
? "ES" : "", prog
->Version
);
3908 linker_warning(prog
, msg
, prog
->IsES
? "ES" : "", prog
->Version
);
3916 link_assign_subroutine_types(struct gl_shader_program
*prog
)
3918 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3919 gl_shader
*sh
= prog
->_LinkedShaders
[i
];
3924 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
3925 ir_function
*fn
= node
->as_function();
3929 if (fn
->is_subroutine
)
3930 sh
->NumSubroutineUniformTypes
++;
3932 if (!fn
->num_subroutine_types
)
3935 sh
->SubroutineFunctions
= reralloc(sh
, sh
->SubroutineFunctions
,
3936 struct gl_subroutine_function
,
3937 sh
->NumSubroutineFunctions
+ 1);
3938 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].name
= ralloc_strdup(sh
, fn
->name
);
3939 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].num_compat_types
= fn
->num_subroutine_types
;
3940 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].types
=
3941 ralloc_array(sh
, const struct glsl_type
*,
3942 fn
->num_subroutine_types
);
3944 /* From Section 4.4.4(Subroutine Function Layout Qualifiers) of the
3947 * "Each subroutine with an index qualifier in the shader must be
3948 * given a unique index, otherwise a compile or link error will be
3951 for (unsigned j
= 0; j
< sh
->NumSubroutineFunctions
; j
++) {
3952 if (sh
->SubroutineFunctions
[j
].index
!= -1 &&
3953 sh
->SubroutineFunctions
[j
].index
== fn
->subroutine_index
) {
3954 linker_error(prog
, "each subroutine index qualifier in the "
3955 "shader must be unique\n");
3959 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].index
=
3960 fn
->subroutine_index
;
3962 for (int j
= 0; j
< fn
->num_subroutine_types
; j
++)
3963 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].types
[j
] = fn
->subroutine_types
[j
];
3964 sh
->NumSubroutineFunctions
++;
3967 /* Assign index for subroutines without an explicit index*/
3969 for (unsigned j
= 0; j
< sh
->NumSubroutineFunctions
; j
++) {
3970 while (sh
->SubroutineFunctions
[j
].index
== -1) {
3971 for (unsigned k
= 0; k
< sh
->NumSubroutineFunctions
; k
++) {
3972 if (sh
->SubroutineFunctions
[k
].index
== index
)
3974 else if (k
== sh
->NumSubroutineFunctions
- 1)
3975 sh
->SubroutineFunctions
[j
].index
= index
;
3984 split_ubos_and_ssbos(void *mem_ctx
,
3985 struct gl_uniform_block
*blocks
,
3986 unsigned num_blocks
,
3987 struct gl_uniform_block
***ubos
,
3989 unsigned **ubo_interface_block_indices
,
3990 struct gl_uniform_block
***ssbos
,
3991 unsigned *num_ssbos
,
3992 unsigned **ssbo_interface_block_indices
)
3994 unsigned num_ubo_blocks
= 0;
3995 unsigned num_ssbo_blocks
= 0;
3997 for (unsigned i
= 0; i
< num_blocks
; i
++) {
3998 if (blocks
[i
].IsShaderStorage
)
4004 *ubos
= ralloc_array(mem_ctx
, gl_uniform_block
*, num_ubo_blocks
);
4007 *ssbos
= ralloc_array(mem_ctx
, gl_uniform_block
*, num_ssbo_blocks
);
4010 if (ubo_interface_block_indices
)
4011 *ubo_interface_block_indices
=
4012 ralloc_array(mem_ctx
, unsigned, num_ubo_blocks
);
4014 if (ssbo_interface_block_indices
)
4015 *ssbo_interface_block_indices
=
4016 ralloc_array(mem_ctx
, unsigned, num_ssbo_blocks
);
4018 for (unsigned i
= 0; i
< num_blocks
; i
++) {
4019 if (blocks
[i
].IsShaderStorage
) {
4020 (*ssbos
)[*num_ssbos
] = &blocks
[i
];
4021 if (ssbo_interface_block_indices
)
4022 (*ssbo_interface_block_indices
)[*num_ssbos
] = i
;
4025 (*ubos
)[*num_ubos
] = &blocks
[i
];
4026 if (ubo_interface_block_indices
)
4027 (*ubo_interface_block_indices
)[*num_ubos
] = i
;
4032 assert(*num_ubos
+ *num_ssbos
== num_blocks
);
4036 set_always_active_io(exec_list
*ir
, ir_variable_mode io_mode
)
4038 assert(io_mode
== ir_var_shader_in
|| io_mode
== ir_var_shader_out
);
4040 foreach_in_list(ir_instruction
, node
, ir
) {
4041 ir_variable
*const var
= node
->as_variable();
4043 if (var
== NULL
|| var
->data
.mode
!= io_mode
)
4046 /* Don't set always active on builtins that haven't been redeclared */
4047 if (var
->data
.how_declared
== ir_var_declared_implicitly
)
4050 var
->data
.always_active_io
= true;
4055 * When separate shader programs are enabled, only input/outputs between
4056 * the stages of a multi-stage separate program can be safely removed
4057 * from the shader interface. Other inputs/outputs must remain active.
4060 disable_varying_optimizations_for_sso(struct gl_shader_program
*prog
)
4062 unsigned first
, last
;
4063 assert(prog
->SeparateShader
);
4065 first
= MESA_SHADER_STAGES
;
4068 /* Determine first and last stage. Excluding the compute stage */
4069 for (unsigned i
= 0; i
< MESA_SHADER_COMPUTE
; i
++) {
4070 if (!prog
->_LinkedShaders
[i
])
4072 if (first
== MESA_SHADER_STAGES
)
4077 if (first
== MESA_SHADER_STAGES
)
4080 for (unsigned stage
= 0; stage
< MESA_SHADER_STAGES
; stage
++) {
4081 gl_shader
*sh
= prog
->_LinkedShaders
[stage
];
4085 if (first
== last
) {
4086 /* For a single shader program only allow inputs to the vertex shader
4087 * and outputs from the fragment shader to be removed.
4089 if (stage
!= MESA_SHADER_VERTEX
)
4090 set_always_active_io(sh
->ir
, ir_var_shader_in
);
4091 if (stage
!= MESA_SHADER_FRAGMENT
)
4092 set_always_active_io(sh
->ir
, ir_var_shader_out
);
4094 /* For multi-stage separate shader programs only allow inputs and
4095 * outputs between the shader stages to be removed as well as inputs
4096 * to the vertex shader and outputs from the fragment shader.
4098 if (stage
== first
&& stage
!= MESA_SHADER_VERTEX
)
4099 set_always_active_io(sh
->ir
, ir_var_shader_in
);
4100 else if (stage
== last
&& stage
!= MESA_SHADER_FRAGMENT
)
4101 set_always_active_io(sh
->ir
, ir_var_shader_out
);
4107 link_shaders(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
4109 tfeedback_decl
*tfeedback_decls
= NULL
;
4110 unsigned num_tfeedback_decls
= prog
->TransformFeedback
.NumVarying
;
4112 void *mem_ctx
= ralloc_context(NULL
); // temporary linker context
4114 prog
->LinkStatus
= true; /* All error paths will set this to false */
4115 prog
->Validated
= false;
4116 prog
->_Used
= false;
4118 prog
->ARB_fragment_coord_conventions_enable
= false;
4120 /* Separate the shaders into groups based on their type.
4122 struct gl_shader
**shader_list
[MESA_SHADER_STAGES
];
4123 unsigned num_shaders
[MESA_SHADER_STAGES
];
4125 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4126 shader_list
[i
] = (struct gl_shader
**)
4127 calloc(prog
->NumShaders
, sizeof(struct gl_shader
*));
4131 unsigned min_version
= UINT_MAX
;
4132 unsigned max_version
= 0;
4133 const bool is_es_prog
=
4134 (prog
->NumShaders
> 0 && prog
->Shaders
[0]->IsES
) ? true : false;
4135 for (unsigned i
= 0; i
< prog
->NumShaders
; i
++) {
4136 min_version
= MIN2(min_version
, prog
->Shaders
[i
]->Version
);
4137 max_version
= MAX2(max_version
, prog
->Shaders
[i
]->Version
);
4139 if (prog
->Shaders
[i
]->IsES
!= is_es_prog
) {
4140 linker_error(prog
, "all shaders must use same shading "
4141 "language version\n");
4145 if (prog
->Shaders
[i
]->ARB_fragment_coord_conventions_enable
) {
4146 prog
->ARB_fragment_coord_conventions_enable
= true;
4149 gl_shader_stage shader_type
= prog
->Shaders
[i
]->Stage
;
4150 shader_list
[shader_type
][num_shaders
[shader_type
]] = prog
->Shaders
[i
];
4151 num_shaders
[shader_type
]++;
4154 /* In desktop GLSL, different shader versions may be linked together. In
4155 * GLSL ES, all shader versions must be the same.
4157 if (is_es_prog
&& min_version
!= max_version
) {
4158 linker_error(prog
, "all shaders must use same shading "
4159 "language version\n");
4163 prog
->Version
= max_version
;
4164 prog
->IsES
= is_es_prog
;
4166 /* From OpenGL 4.5 Core specification (7.3 Program Objects):
4167 * "Linking can fail for a variety of reasons as specified in the OpenGL
4168 * Shading Language Specification, as well as any of the following
4171 * * No shader objects are attached to program.
4175 * Same rule applies for OpenGL ES >= 3.1.
4178 if (prog
->NumShaders
== 0 &&
4179 ((ctx
->API
== API_OPENGL_CORE
&& ctx
->Version
>= 45) ||
4180 (ctx
->API
== API_OPENGLES2
&& ctx
->Version
>= 31))) {
4181 linker_error(prog
, "No shader objects are attached to program.\n");
4185 /* Some shaders have to be linked with some other shaders present.
4187 if (num_shaders
[MESA_SHADER_GEOMETRY
] > 0 &&
4188 num_shaders
[MESA_SHADER_VERTEX
] == 0 &&
4189 !prog
->SeparateShader
) {
4190 linker_error(prog
, "Geometry shader must be linked with "
4194 if (num_shaders
[MESA_SHADER_TESS_EVAL
] > 0 &&
4195 num_shaders
[MESA_SHADER_VERTEX
] == 0 &&
4196 !prog
->SeparateShader
) {
4197 linker_error(prog
, "Tessellation evaluation shader must be linked with "
4201 if (num_shaders
[MESA_SHADER_TESS_CTRL
] > 0 &&
4202 num_shaders
[MESA_SHADER_VERTEX
] == 0 &&
4203 !prog
->SeparateShader
) {
4204 linker_error(prog
, "Tessellation control shader must be linked with "
4209 /* The spec is self-contradictory here. It allows linking without a tess
4210 * eval shader, but that can only be used with transform feedback and
4211 * rasterization disabled. However, transform feedback isn't allowed
4212 * with GL_PATCHES, so it can't be used.
4214 * More investigation showed that the idea of transform feedback after
4215 * a tess control shader was dropped, because some hw vendors couldn't
4216 * support tessellation without a tess eval shader, but the linker section
4217 * wasn't updated to reflect that.
4219 * All specifications (ARB_tessellation_shader, GL 4.0-4.5) have this
4222 * Do what's reasonable and always require a tess eval shader if a tess
4223 * control shader is present.
4225 if (num_shaders
[MESA_SHADER_TESS_CTRL
] > 0 &&
4226 num_shaders
[MESA_SHADER_TESS_EVAL
] == 0 &&
4227 !prog
->SeparateShader
) {
4228 linker_error(prog
, "Tessellation control shader must be linked with "
4229 "tessellation evaluation shader\n");
4233 /* Compute shaders have additional restrictions. */
4234 if (num_shaders
[MESA_SHADER_COMPUTE
] > 0 &&
4235 num_shaders
[MESA_SHADER_COMPUTE
] != prog
->NumShaders
) {
4236 linker_error(prog
, "Compute shaders may not be linked with any other "
4237 "type of shader\n");
4240 for (unsigned int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4241 if (prog
->_LinkedShaders
[i
] != NULL
)
4242 _mesa_delete_shader(ctx
, prog
->_LinkedShaders
[i
]);
4244 prog
->_LinkedShaders
[i
] = NULL
;
4247 /* Link all shaders for a particular stage and validate the result.
4249 for (int stage
= 0; stage
< MESA_SHADER_STAGES
; stage
++) {
4250 if (num_shaders
[stage
] > 0) {
4251 gl_shader
*const sh
=
4252 link_intrastage_shaders(mem_ctx
, ctx
, prog
, shader_list
[stage
],
4253 num_shaders
[stage
]);
4255 if (!prog
->LinkStatus
) {
4257 _mesa_delete_shader(ctx
, sh
);
4262 case MESA_SHADER_VERTEX
:
4263 validate_vertex_shader_executable(prog
, sh
);
4265 case MESA_SHADER_TESS_CTRL
:
4266 /* nothing to be done */
4268 case MESA_SHADER_TESS_EVAL
:
4269 validate_tess_eval_shader_executable(prog
, sh
);
4271 case MESA_SHADER_GEOMETRY
:
4272 validate_geometry_shader_executable(prog
, sh
);
4274 case MESA_SHADER_FRAGMENT
:
4275 validate_fragment_shader_executable(prog
, sh
);
4278 if (!prog
->LinkStatus
) {
4280 _mesa_delete_shader(ctx
, sh
);
4284 _mesa_reference_shader(ctx
, &prog
->_LinkedShaders
[stage
], sh
);
4288 if (num_shaders
[MESA_SHADER_GEOMETRY
] > 0)
4289 prog
->LastClipDistanceArraySize
= prog
->Geom
.ClipDistanceArraySize
;
4290 else if (num_shaders
[MESA_SHADER_TESS_EVAL
] > 0)
4291 prog
->LastClipDistanceArraySize
= prog
->TessEval
.ClipDistanceArraySize
;
4292 else if (num_shaders
[MESA_SHADER_VERTEX
] > 0)
4293 prog
->LastClipDistanceArraySize
= prog
->Vert
.ClipDistanceArraySize
;
4295 prog
->LastClipDistanceArraySize
= 0; /* Not used */
4297 /* Here begins the inter-stage linking phase. Some initial validation is
4298 * performed, then locations are assigned for uniforms, attributes, and
4301 cross_validate_uniforms(prog
);
4302 if (!prog
->LinkStatus
)
4305 unsigned first
, last
, prev
;
4307 first
= MESA_SHADER_STAGES
;
4310 /* Determine first and last stage. */
4311 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4312 if (!prog
->_LinkedShaders
[i
])
4314 if (first
== MESA_SHADER_STAGES
)
4319 check_explicit_uniform_locations(ctx
, prog
);
4320 link_assign_subroutine_types(prog
);
4322 if (!prog
->LinkStatus
)
4325 resize_tes_inputs(ctx
, prog
);
4327 /* Validate the inputs of each stage with the output of the preceding
4331 for (unsigned i
= prev
+ 1; i
<= MESA_SHADER_FRAGMENT
; i
++) {
4332 if (prog
->_LinkedShaders
[i
] == NULL
)
4335 validate_interstage_inout_blocks(prog
, prog
->_LinkedShaders
[prev
],
4336 prog
->_LinkedShaders
[i
]);
4337 if (!prog
->LinkStatus
)
4340 cross_validate_outputs_to_inputs(prog
,
4341 prog
->_LinkedShaders
[prev
],
4342 prog
->_LinkedShaders
[i
]);
4343 if (!prog
->LinkStatus
)
4349 /* Cross-validate uniform blocks between shader stages */
4350 validate_interstage_uniform_blocks(prog
, prog
->_LinkedShaders
,
4351 MESA_SHADER_STAGES
);
4352 if (!prog
->LinkStatus
)
4355 for (unsigned int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4356 if (prog
->_LinkedShaders
[i
] != NULL
)
4357 lower_named_interface_blocks(mem_ctx
, prog
->_LinkedShaders
[i
]);
4360 /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
4361 * it before optimization because we want most of the checks to get
4362 * dropped thanks to constant propagation.
4364 * This rule also applies to GLSL ES 3.00.
4366 if (max_version
>= (is_es_prog
? 300 : 130)) {
4367 struct gl_shader
*sh
= prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
4369 lower_discard_flow(sh
->ir
);
4373 if (prog
->SeparateShader
)
4374 disable_varying_optimizations_for_sso(prog
);
4376 if (!interstage_cross_validate_uniform_blocks(prog
))
4379 /* Do common optimization before assigning storage for attributes,
4380 * uniforms, and varyings. Later optimization could possibly make
4381 * some of that unused.
4383 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4384 if (prog
->_LinkedShaders
[i
] == NULL
)
4387 detect_recursion_linked(prog
, prog
->_LinkedShaders
[i
]->ir
);
4388 if (!prog
->LinkStatus
)
4391 if (ctx
->Const
.ShaderCompilerOptions
[i
].LowerClipDistance
) {
4392 lower_clip_distance(prog
->_LinkedShaders
[i
]);
4395 if (ctx
->Const
.LowerTessLevel
) {
4396 lower_tess_level(prog
->_LinkedShaders
[i
]);
4399 while (do_common_optimization(prog
->_LinkedShaders
[i
]->ir
, true, false,
4400 &ctx
->Const
.ShaderCompilerOptions
[i
],
4401 ctx
->Const
.NativeIntegers
))
4404 lower_const_arrays_to_uniforms(prog
->_LinkedShaders
[i
]->ir
);
4407 /* Validation for special cases where we allow sampler array indexing
4408 * with loop induction variable. This check emits a warning or error
4409 * depending if backend can handle dynamic indexing.
4411 if ((!prog
->IsES
&& prog
->Version
< 130) ||
4412 (prog
->IsES
&& prog
->Version
< 300)) {
4413 if (!validate_sampler_array_indexing(ctx
, prog
))
4417 /* Check and validate stream emissions in geometry shaders */
4418 validate_geometry_shader_emissions(ctx
, prog
);
4420 /* Mark all generic shader inputs and outputs as unpaired. */
4421 for (unsigned i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_FRAGMENT
; i
++) {
4422 if (prog
->_LinkedShaders
[i
] != NULL
) {
4423 link_invalidate_variable_locations(prog
->_LinkedShaders
[i
]->ir
);
4428 for (unsigned i
= prev
+ 1; i
<= MESA_SHADER_FRAGMENT
; i
++) {
4429 if (prog
->_LinkedShaders
[i
] == NULL
)
4432 match_explicit_outputs_to_inputs(prog
, prog
->_LinkedShaders
[prev
],
4433 prog
->_LinkedShaders
[i
]);
4437 if (!assign_attribute_or_color_locations(prog
, &ctx
->Const
,
4438 MESA_SHADER_VERTEX
)) {
4442 if (!assign_attribute_or_color_locations(prog
, &ctx
->Const
,
4443 MESA_SHADER_FRAGMENT
)) {
4447 if (num_tfeedback_decls
!= 0) {
4448 /* From GL_EXT_transform_feedback:
4449 * A program will fail to link if:
4451 * * the <count> specified by TransformFeedbackVaryingsEXT is
4452 * non-zero, but the program object has no vertex or geometry
4455 if (first
== MESA_SHADER_FRAGMENT
) {
4456 linker_error(prog
, "Transform feedback varyings specified, but "
4457 "no vertex or geometry shader is present.\n");
4461 tfeedback_decls
= ralloc_array(mem_ctx
, tfeedback_decl
,
4462 prog
->TransformFeedback
.NumVarying
);
4463 if (!parse_tfeedback_decls(ctx
, prog
, mem_ctx
, num_tfeedback_decls
,
4464 prog
->TransformFeedback
.VaryingNames
,
4469 /* Linking the stages in the opposite order (from fragment to vertex)
4470 * ensures that inter-shader outputs written to in an earlier stage are
4471 * eliminated if they are (transitively) not used in a later stage.
4475 if (first
< MESA_SHADER_FRAGMENT
) {
4476 gl_shader
*const sh
= prog
->_LinkedShaders
[last
];
4478 if (first
!= MESA_SHADER_VERTEX
) {
4479 /* There was no vertex shader, but we still have to assign varying
4480 * locations for use by tessellation/geometry shader inputs in SSO.
4482 * If the shader is not separable (i.e., prog->SeparateShader is
4483 * false), linking will have already failed when first is not
4484 * MESA_SHADER_VERTEX.
4486 if (!assign_varying_locations(ctx
, mem_ctx
, prog
,
4487 NULL
, prog
->_LinkedShaders
[first
],
4488 num_tfeedback_decls
, tfeedback_decls
))
4492 if (last
!= MESA_SHADER_FRAGMENT
&&
4493 (num_tfeedback_decls
!= 0 || prog
->SeparateShader
)) {
4494 /* There was no fragment shader, but we still have to assign varying
4495 * locations for use by transform feedback.
4497 if (!assign_varying_locations(ctx
, mem_ctx
, prog
,
4499 num_tfeedback_decls
, tfeedback_decls
))
4503 do_dead_builtin_varyings(ctx
, sh
, NULL
,
4504 num_tfeedback_decls
, tfeedback_decls
);
4506 remove_unused_shader_inputs_and_outputs(prog
->SeparateShader
, sh
,
4509 else if (first
== MESA_SHADER_FRAGMENT
) {
4510 /* If the program only contains a fragment shader...
4512 gl_shader
*const sh
= prog
->_LinkedShaders
[first
];
4514 do_dead_builtin_varyings(ctx
, NULL
, sh
,
4515 num_tfeedback_decls
, tfeedback_decls
);
4517 if (prog
->SeparateShader
) {
4518 if (!assign_varying_locations(ctx
, mem_ctx
, prog
,
4519 NULL
/* producer */,
4521 0 /* num_tfeedback_decls */,
4522 NULL
/* tfeedback_decls */))
4525 remove_unused_shader_inputs_and_outputs(false, sh
,
4531 for (int i
= next
- 1; i
>= 0; i
--) {
4532 if (prog
->_LinkedShaders
[i
] == NULL
)
4535 gl_shader
*const sh_i
= prog
->_LinkedShaders
[i
];
4536 gl_shader
*const sh_next
= prog
->_LinkedShaders
[next
];
4538 if (!assign_varying_locations(ctx
, mem_ctx
, prog
, sh_i
, sh_next
,
4539 next
== MESA_SHADER_FRAGMENT
? num_tfeedback_decls
: 0,
4543 do_dead_builtin_varyings(ctx
, sh_i
, sh_next
,
4544 next
== MESA_SHADER_FRAGMENT
? num_tfeedback_decls
: 0,
4547 /* This must be done after all dead varyings are eliminated. */
4548 if (!check_against_output_limit(ctx
, prog
, sh_i
))
4550 if (!check_against_input_limit(ctx
, prog
, sh_next
))
4556 if (!store_tfeedback_info(ctx
, prog
, num_tfeedback_decls
, tfeedback_decls
))
4559 update_array_sizes(prog
);
4560 link_assign_uniform_locations(prog
, ctx
->Const
.UniformBooleanTrue
);
4561 link_assign_atomic_counter_resources(ctx
, prog
);
4562 store_fragdepth_layout(prog
);
4564 link_calculate_subroutine_compat(prog
);
4565 check_resources(ctx
, prog
);
4566 check_subroutine_resources(prog
);
4567 check_image_resources(ctx
, prog
);
4568 link_check_atomic_counter_resources(ctx
, prog
);
4570 if (!prog
->LinkStatus
)
4573 /* OpenGL ES requires that a vertex shader and a fragment shader both be
4574 * present in a linked program. GL_ARB_ES2_compatibility doesn't say
4575 * anything about shader linking when one of the shaders (vertex or
4576 * fragment shader) is absent. So, the extension shouldn't change the
4577 * behavior specified in GLSL specification.
4579 if (!prog
->SeparateShader
&& ctx
->API
== API_OPENGLES2
) {
4580 /* With ES < 3.1 one needs to have always vertex + fragment shader. */
4581 if (ctx
->Version
< 31) {
4582 if (prog
->_LinkedShaders
[MESA_SHADER_VERTEX
] == NULL
) {
4583 linker_error(prog
, "program lacks a vertex shader\n");
4584 } else if (prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
] == NULL
) {
4585 linker_error(prog
, "program lacks a fragment shader\n");
4588 /* From OpenGL ES 3.1 specification (7.3 Program Objects):
4589 * "Linking can fail for a variety of reasons as specified in the
4590 * OpenGL ES Shading Language Specification, as well as any of the
4591 * following reasons:
4595 * * program contains objects to form either a vertex shader or
4596 * fragment shader, and program is not separable, and does not
4597 * contain objects to form both a vertex shader and fragment
4600 if (!!prog
->_LinkedShaders
[MESA_SHADER_VERTEX
] ^
4601 !!prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
]) {
4602 linker_error(prog
, "Program needs to contain both vertex and "
4603 "fragment shaders.\n");
4608 /* Split BufferInterfaceBlocks into UniformBlocks and ShaderStorageBlocks
4609 * for gl_shader_program and gl_shader, so that drivers that need separate
4610 * index spaces for each set can have that.
4612 for (unsigned i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_STAGES
; i
++) {
4613 if (prog
->_LinkedShaders
[i
] != NULL
) {
4614 gl_shader
*sh
= prog
->_LinkedShaders
[i
];
4615 split_ubos_and_ssbos(sh
,
4616 sh
->BufferInterfaceBlocks
,
4617 sh
->NumBufferInterfaceBlocks
,
4619 &sh
->NumUniformBlocks
,
4621 &sh
->ShaderStorageBlocks
,
4622 &sh
->NumShaderStorageBlocks
,
4627 split_ubos_and_ssbos(prog
,
4628 prog
->BufferInterfaceBlocks
,
4629 prog
->NumBufferInterfaceBlocks
,
4630 &prog
->UniformBlocks
,
4631 &prog
->NumUniformBlocks
,
4632 &prog
->UboInterfaceBlockIndex
,
4633 &prog
->ShaderStorageBlocks
,
4634 &prog
->NumShaderStorageBlocks
,
4635 &prog
->SsboInterfaceBlockIndex
);
4637 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4638 if (prog
->_LinkedShaders
[i
] == NULL
)
4641 if (ctx
->Const
.ShaderCompilerOptions
[i
].LowerBufferInterfaceBlocks
)
4642 lower_ubo_reference(prog
->_LinkedShaders
[i
]);
4644 if (ctx
->Const
.ShaderCompilerOptions
[i
].LowerShaderSharedVariables
)
4645 lower_shared_reference(prog
->_LinkedShaders
[i
],
4646 &prog
->Comp
.SharedSize
);
4648 lower_vector_derefs(prog
->_LinkedShaders
[i
]);
4652 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4653 free(shader_list
[i
]);
4654 if (prog
->_LinkedShaders
[i
] == NULL
)
4657 /* Do a final validation step to make sure that the IR wasn't
4658 * invalidated by any modifications performed after intrastage linking.
4660 validate_ir_tree(prog
->_LinkedShaders
[i
]->ir
);
4662 /* Retain any live IR, but trash the rest. */
4663 reparent_ir(prog
->_LinkedShaders
[i
]->ir
, prog
->_LinkedShaders
[i
]->ir
);
4665 /* The symbol table in the linked shaders may contain references to
4666 * variables that were removed (e.g., unused uniforms). Since it may
4667 * contain junk, there is no possible valid use. Delete it and set the
4670 delete prog
->_LinkedShaders
[i
]->symbols
;
4671 prog
->_LinkedShaders
[i
]->symbols
= NULL
;
4674 ralloc_free(mem_ctx
);