2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
26 * GLSL linker implementation
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
35 * - Undefined references in each shader are resolve to definitions in
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
64 * \author Ian Romanick <ian.d.romanick@intel.com>
68 #include "util/strndup.h"
69 #include "main/core.h"
70 #include "glsl_symbol_table.h"
71 #include "glsl_parser_extras.h"
74 #include "program/hash_table.h"
76 #include "link_varyings.h"
77 #include "ir_optimization.h"
78 #include "ir_rvalue_visitor.h"
79 #include "ir_uniform.h"
81 #include "main/shaderobj.h"
82 #include "main/enums.h"
85 void linker_error(gl_shader_program
*, const char *, ...);
90 * Visitor that determines whether or not a variable is ever written.
92 class find_assignment_visitor
: public ir_hierarchical_visitor
{
94 find_assignment_visitor(const char *name
)
95 : name(name
), found(false)
100 virtual ir_visitor_status
visit_enter(ir_assignment
*ir
)
102 ir_variable
*const var
= ir
->lhs
->variable_referenced();
104 if (strcmp(name
, var
->name
) == 0) {
109 return visit_continue_with_parent
;
112 virtual ir_visitor_status
visit_enter(ir_call
*ir
)
114 foreach_two_lists(formal_node
, &ir
->callee
->parameters
,
115 actual_node
, &ir
->actual_parameters
) {
116 ir_rvalue
*param_rval
= (ir_rvalue
*) actual_node
;
117 ir_variable
*sig_param
= (ir_variable
*) formal_node
;
119 if (sig_param
->data
.mode
== ir_var_function_out
||
120 sig_param
->data
.mode
== ir_var_function_inout
) {
121 ir_variable
*var
= param_rval
->variable_referenced();
122 if (var
&& strcmp(name
, var
->name
) == 0) {
129 if (ir
->return_deref
!= NULL
) {
130 ir_variable
*const var
= ir
->return_deref
->variable_referenced();
132 if (strcmp(name
, var
->name
) == 0) {
138 return visit_continue_with_parent
;
141 bool variable_found()
147 const char *name
; /**< Find writes to a variable with this name. */
148 bool found
; /**< Was a write to the variable found? */
153 * Visitor that determines whether or not a variable is ever read.
155 class find_deref_visitor
: public ir_hierarchical_visitor
{
157 find_deref_visitor(const char *name
)
158 : name(name
), found(false)
163 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
)
165 if (strcmp(this->name
, ir
->var
->name
) == 0) {
170 return visit_continue
;
173 bool variable_found() const
179 const char *name
; /**< Find writes to a variable with this name. */
180 bool found
; /**< Was a write to the variable found? */
184 class geom_array_resize_visitor
: public ir_hierarchical_visitor
{
186 unsigned num_vertices
;
187 gl_shader_program
*prog
;
189 geom_array_resize_visitor(unsigned num_vertices
, gl_shader_program
*prog
)
191 this->num_vertices
= num_vertices
;
195 virtual ~geom_array_resize_visitor()
200 virtual ir_visitor_status
visit(ir_variable
*var
)
202 if (!var
->type
->is_array() || var
->data
.mode
!= ir_var_shader_in
)
203 return visit_continue
;
205 unsigned size
= var
->type
->length
;
207 /* Generate a link error if the shader has declared this array with an
210 if (size
&& size
!= this->num_vertices
) {
211 linker_error(this->prog
, "size of array %s declared as %u, "
212 "but number of input vertices is %u\n",
213 var
->name
, size
, this->num_vertices
);
214 return visit_continue
;
217 /* Generate a link error if the shader attempts to access an input
218 * array using an index too large for its actual size assigned at link
221 if (var
->data
.max_array_access
>= this->num_vertices
) {
222 linker_error(this->prog
, "geometry shader accesses element %i of "
223 "%s, but only %i input vertices\n",
224 var
->data
.max_array_access
, var
->name
, this->num_vertices
);
225 return visit_continue
;
228 var
->type
= glsl_type::get_array_instance(var
->type
->fields
.array
,
230 var
->data
.max_array_access
= this->num_vertices
- 1;
232 return visit_continue
;
235 /* Dereferences of input variables need to be updated so that their type
236 * matches the newly assigned type of the variable they are accessing. */
237 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
)
239 ir
->type
= ir
->var
->type
;
240 return visit_continue
;
243 /* Dereferences of 2D input arrays need to be updated so that their type
244 * matches the newly assigned type of the array they are accessing. */
245 virtual ir_visitor_status
visit_leave(ir_dereference_array
*ir
)
247 const glsl_type
*const vt
= ir
->array
->type
;
249 ir
->type
= vt
->fields
.array
;
250 return visit_continue
;
254 class tess_eval_array_resize_visitor
: public ir_hierarchical_visitor
{
256 unsigned num_vertices
;
257 gl_shader_program
*prog
;
259 tess_eval_array_resize_visitor(unsigned num_vertices
, gl_shader_program
*prog
)
261 this->num_vertices
= num_vertices
;
265 virtual ~tess_eval_array_resize_visitor()
270 virtual ir_visitor_status
visit(ir_variable
*var
)
272 if (!var
->type
->is_array() || var
->data
.mode
!= ir_var_shader_in
|| var
->data
.patch
)
273 return visit_continue
;
275 var
->type
= glsl_type::get_array_instance(var
->type
->fields
.array
,
277 var
->data
.max_array_access
= this->num_vertices
- 1;
279 return visit_continue
;
282 /* Dereferences of input variables need to be updated so that their type
283 * matches the newly assigned type of the variable they are accessing. */
284 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
)
286 ir
->type
= ir
->var
->type
;
287 return visit_continue
;
290 /* Dereferences of 2D input arrays need to be updated so that their type
291 * matches the newly assigned type of the array they are accessing. */
292 virtual ir_visitor_status
visit_leave(ir_dereference_array
*ir
)
294 const glsl_type
*const vt
= ir
->array
->type
;
296 ir
->type
= vt
->fields
.array
;
297 return visit_continue
;
301 class barrier_use_visitor
: public ir_hierarchical_visitor
{
303 barrier_use_visitor(gl_shader_program
*prog
)
304 : prog(prog
), in_main(false), after_return(false), control_flow(0)
308 virtual ~barrier_use_visitor()
313 virtual ir_visitor_status
visit_enter(ir_function
*ir
)
315 if (strcmp(ir
->name
, "main") == 0)
318 return visit_continue
;
321 virtual ir_visitor_status
visit_leave(ir_function
*)
324 after_return
= false;
325 return visit_continue
;
328 virtual ir_visitor_status
visit_leave(ir_return
*)
331 return visit_continue
;
334 virtual ir_visitor_status
visit_enter(ir_if
*)
337 return visit_continue
;
340 virtual ir_visitor_status
visit_leave(ir_if
*)
343 return visit_continue
;
346 virtual ir_visitor_status
visit_enter(ir_loop
*)
349 return visit_continue
;
352 virtual ir_visitor_status
visit_leave(ir_loop
*)
355 return visit_continue
;
358 /* FINISHME: `switch` is not expressed at the IR level -- it's already
359 * been lowered to a mess of `if`s. We'll correctly disallow any use of
360 * barrier() in a conditional path within the switch, but not in a path
361 * which is always hit.
364 virtual ir_visitor_status
visit_enter(ir_call
*ir
)
366 if (ir
->use_builtin
&& strcmp(ir
->callee_name(), "barrier") == 0) {
367 /* Use of barrier(); determine if it is legal: */
369 linker_error(prog
, "Builtin barrier() may only be used in main");
374 linker_error(prog
, "Builtin barrier() may not be used after return");
378 if (control_flow
!= 0) {
379 linker_error(prog
, "Builtin barrier() may not be used inside control flow");
383 return visit_continue
;
387 gl_shader_program
*prog
;
388 bool in_main
, after_return
;
393 * Visitor that determines the highest stream id to which a (geometry) shader
394 * emits vertices. It also checks whether End{Stream}Primitive is ever called.
396 class find_emit_vertex_visitor
: public ir_hierarchical_visitor
{
398 find_emit_vertex_visitor(int max_allowed
)
399 : max_stream_allowed(max_allowed
),
400 invalid_stream_id(0),
401 invalid_stream_id_from_emit_vertex(false),
402 end_primitive_found(false),
403 uses_non_zero_stream(false)
408 virtual ir_visitor_status
visit_leave(ir_emit_vertex
*ir
)
410 int stream_id
= ir
->stream_id();
413 invalid_stream_id
= stream_id
;
414 invalid_stream_id_from_emit_vertex
= true;
418 if (stream_id
> max_stream_allowed
) {
419 invalid_stream_id
= stream_id
;
420 invalid_stream_id_from_emit_vertex
= true;
425 uses_non_zero_stream
= true;
427 return visit_continue
;
430 virtual ir_visitor_status
visit_leave(ir_end_primitive
*ir
)
432 end_primitive_found
= true;
434 int stream_id
= ir
->stream_id();
437 invalid_stream_id
= stream_id
;
438 invalid_stream_id_from_emit_vertex
= false;
442 if (stream_id
> max_stream_allowed
) {
443 invalid_stream_id
= stream_id
;
444 invalid_stream_id_from_emit_vertex
= false;
449 uses_non_zero_stream
= true;
451 return visit_continue
;
456 return invalid_stream_id
!= 0;
459 const char *error_func()
461 return invalid_stream_id_from_emit_vertex
?
462 "EmitStreamVertex" : "EndStreamPrimitive";
467 return invalid_stream_id
;
472 return uses_non_zero_stream
;
475 bool uses_end_primitive()
477 return end_primitive_found
;
481 int max_stream_allowed
;
482 int invalid_stream_id
;
483 bool invalid_stream_id_from_emit_vertex
;
484 bool end_primitive_found
;
485 bool uses_non_zero_stream
;
488 /* Class that finds array derefs and check if indexes are dynamic. */
489 class dynamic_sampler_array_indexing_visitor
: public ir_hierarchical_visitor
492 dynamic_sampler_array_indexing_visitor() :
493 dynamic_sampler_array_indexing(false)
497 ir_visitor_status
visit_enter(ir_dereference_array
*ir
)
499 if (!ir
->variable_referenced())
500 return visit_continue
;
502 if (!ir
->variable_referenced()->type
->contains_sampler())
503 return visit_continue
;
505 if (!ir
->array_index
->constant_expression_value()) {
506 dynamic_sampler_array_indexing
= true;
509 return visit_continue
;
512 bool uses_dynamic_sampler_array_indexing()
514 return dynamic_sampler_array_indexing
;
518 bool dynamic_sampler_array_indexing
;
521 } /* anonymous namespace */
524 linker_error(gl_shader_program
*prog
, const char *fmt
, ...)
528 ralloc_strcat(&prog
->InfoLog
, "error: ");
530 ralloc_vasprintf_append(&prog
->InfoLog
, fmt
, ap
);
533 prog
->LinkStatus
= false;
538 linker_warning(gl_shader_program
*prog
, const char *fmt
, ...)
542 ralloc_strcat(&prog
->InfoLog
, "warning: ");
544 ralloc_vasprintf_append(&prog
->InfoLog
, fmt
, ap
);
551 * Given a string identifying a program resource, break it into a base name
552 * and an optional array index in square brackets.
554 * If an array index is present, \c out_base_name_end is set to point to the
555 * "[" that precedes the array index, and the array index itself is returned
558 * If no array index is present (or if the array index is negative or
559 * mal-formed), \c out_base_name_end, is set to point to the null terminator
560 * at the end of the input string, and -1 is returned.
562 * Only the final array index is parsed; if the string contains other array
563 * indices (or structure field accesses), they are left in the base name.
565 * No attempt is made to check that the base name is properly formed;
566 * typically the caller will look up the base name in a hash table, so
567 * ill-formed base names simply turn into hash table lookup failures.
570 parse_program_resource_name(const GLchar
*name
,
571 const GLchar
**out_base_name_end
)
573 /* Section 7.3.1 ("Program Interfaces") of the OpenGL 4.3 spec says:
575 * "When an integer array element or block instance number is part of
576 * the name string, it will be specified in decimal form without a "+"
577 * or "-" sign or any extra leading zeroes. Additionally, the name
578 * string will not include white space anywhere in the string."
581 const size_t len
= strlen(name
);
582 *out_base_name_end
= name
+ len
;
584 if (len
== 0 || name
[len
-1] != ']')
587 /* Walk backwards over the string looking for a non-digit character. This
588 * had better be the opening bracket for an array index.
590 * Initially, i specifies the location of the ']'. Since the string may
591 * contain only the ']' charcater, walk backwards very carefully.
594 for (i
= len
- 1; (i
> 0) && isdigit(name
[i
-1]); --i
)
597 if ((i
== 0) || name
[i
-1] != '[')
600 long array_index
= strtol(&name
[i
], NULL
, 10);
604 /* Check for leading zero */
605 if (name
[i
] == '0' && name
[i
+1] != ']')
608 *out_base_name_end
= name
+ (i
- 1);
614 link_invalidate_variable_locations(exec_list
*ir
)
616 foreach_in_list(ir_instruction
, node
, ir
) {
617 ir_variable
*const var
= node
->as_variable();
622 /* Only assign locations for variables that lack an explicit location.
623 * Explicit locations are set for all built-in variables, generic vertex
624 * shader inputs (via layout(location=...)), and generic fragment shader
625 * outputs (also via layout(location=...)).
627 if (!var
->data
.explicit_location
) {
628 var
->data
.location
= -1;
629 var
->data
.location_frac
= 0;
632 /* ir_variable::is_unmatched_generic_inout is used by the linker while
633 * connecting outputs from one stage to inputs of the next stage.
635 if (var
->data
.explicit_location
&&
636 var
->data
.location
< VARYING_SLOT_VAR0
) {
637 var
->data
.is_unmatched_generic_inout
= 0;
639 var
->data
.is_unmatched_generic_inout
= 1;
646 * Set clip_distance_array_size based on the given shader.
648 * Also check for errors based on incorrect usage of gl_ClipVertex and
651 * Return false if an error was reported.
654 analyze_clip_usage(struct gl_shader_program
*prog
,
655 struct gl_shader
*shader
,
656 GLuint
*clip_distance_array_size
)
658 *clip_distance_array_size
= 0;
660 if (!prog
->IsES
&& prog
->Version
>= 130) {
661 /* From section 7.1 (Vertex Shader Special Variables) of the
664 * "It is an error for a shader to statically write both
665 * gl_ClipVertex and gl_ClipDistance."
667 * This does not apply to GLSL ES shaders, since GLSL ES defines neither
668 * gl_ClipVertex nor gl_ClipDistance.
670 find_assignment_visitor
clip_vertex("gl_ClipVertex");
671 find_assignment_visitor
clip_distance("gl_ClipDistance");
673 clip_vertex
.run(shader
->ir
);
674 clip_distance
.run(shader
->ir
);
675 if (clip_vertex
.variable_found() && clip_distance
.variable_found()) {
676 linker_error(prog
, "%s shader writes to both `gl_ClipVertex' "
677 "and `gl_ClipDistance'\n",
678 _mesa_shader_stage_to_string(shader
->Stage
));
682 if (clip_distance
.variable_found()) {
683 ir_variable
*clip_distance_var
=
684 shader
->symbols
->get_variable("gl_ClipDistance");
686 assert(clip_distance_var
);
687 *clip_distance_array_size
= clip_distance_var
->type
->length
;
694 * Verify that a vertex shader executable meets all semantic requirements.
696 * Also sets prog->Vert.ClipDistanceArraySize as a side effect.
698 * \param shader Vertex shader executable to be verified
701 validate_vertex_shader_executable(struct gl_shader_program
*prog
,
702 struct gl_shader
*shader
)
707 /* From the GLSL 1.10 spec, page 48:
709 * "The variable gl_Position is available only in the vertex
710 * language and is intended for writing the homogeneous vertex
711 * position. All executions of a well-formed vertex shader
712 * executable must write a value into this variable. [...] The
713 * variable gl_Position is available only in the vertex
714 * language and is intended for writing the homogeneous vertex
715 * position. All executions of a well-formed vertex shader
716 * executable must write a value into this variable."
718 * while in GLSL 1.40 this text is changed to:
720 * "The variable gl_Position is available only in the vertex
721 * language and is intended for writing the homogeneous vertex
722 * position. It can be written at any time during shader
723 * execution. It may also be read back by a vertex shader
724 * after being written. This value will be used by primitive
725 * assembly, clipping, culling, and other fixed functionality
726 * operations, if present, that operate on primitives after
727 * vertex processing has occurred. Its value is undefined if
728 * the vertex shader executable does not write gl_Position."
730 * All GLSL ES Versions are similar to GLSL 1.40--failing to write to
731 * gl_Position is not an error.
733 if (prog
->Version
< (prog
->IsES
? 300 : 140)) {
734 find_assignment_visitor
find("gl_Position");
735 find
.run(shader
->ir
);
736 if (!find
.variable_found()) {
739 "vertex shader does not write to `gl_Position'."
740 "It's value is undefined. \n");
743 "vertex shader does not write to `gl_Position'. \n");
749 analyze_clip_usage(prog
, shader
, &prog
->Vert
.ClipDistanceArraySize
);
753 validate_tess_eval_shader_executable(struct gl_shader_program
*prog
,
754 struct gl_shader
*shader
)
759 analyze_clip_usage(prog
, shader
, &prog
->TessEval
.ClipDistanceArraySize
);
764 * Verify that a fragment shader executable meets all semantic requirements
766 * \param shader Fragment shader executable to be verified
769 validate_fragment_shader_executable(struct gl_shader_program
*prog
,
770 struct gl_shader
*shader
)
775 find_assignment_visitor
frag_color("gl_FragColor");
776 find_assignment_visitor
frag_data("gl_FragData");
778 frag_color
.run(shader
->ir
);
779 frag_data
.run(shader
->ir
);
781 if (frag_color
.variable_found() && frag_data
.variable_found()) {
782 linker_error(prog
, "fragment shader writes to both "
783 "`gl_FragColor' and `gl_FragData'\n");
788 * Verify that a geometry shader executable meets all semantic requirements
790 * Also sets prog->Geom.VerticesIn, and prog->Geom.ClipDistanceArraySize as
793 * \param shader Geometry shader executable to be verified
796 validate_geometry_shader_executable(struct gl_shader_program
*prog
,
797 struct gl_shader
*shader
)
802 unsigned num_vertices
= vertices_per_prim(prog
->Geom
.InputType
);
803 prog
->Geom
.VerticesIn
= num_vertices
;
805 analyze_clip_usage(prog
, shader
, &prog
->Geom
.ClipDistanceArraySize
);
809 * Check if geometry shaders emit to non-zero streams and do corresponding
813 validate_geometry_shader_emissions(struct gl_context
*ctx
,
814 struct gl_shader_program
*prog
)
816 if (prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
] != NULL
) {
817 find_emit_vertex_visitor
emit_vertex(ctx
->Const
.MaxVertexStreams
- 1);
818 emit_vertex
.run(prog
->_LinkedShaders
[MESA_SHADER_GEOMETRY
]->ir
);
819 if (emit_vertex
.error()) {
820 linker_error(prog
, "Invalid call %s(%d). Accepted values for the "
821 "stream parameter are in the range [0, %d].\n",
822 emit_vertex
.error_func(),
823 emit_vertex
.error_stream(),
824 ctx
->Const
.MaxVertexStreams
- 1);
826 prog
->Geom
.UsesStreams
= emit_vertex
.uses_streams();
827 prog
->Geom
.UsesEndPrimitive
= emit_vertex
.uses_end_primitive();
829 /* From the ARB_gpu_shader5 spec:
831 * "Multiple vertex streams are supported only if the output primitive
832 * type is declared to be "points". A program will fail to link if it
833 * contains a geometry shader calling EmitStreamVertex() or
834 * EndStreamPrimitive() if its output primitive type is not "points".
836 * However, in the same spec:
838 * "The function EmitVertex() is equivalent to calling EmitStreamVertex()
839 * with <stream> set to zero."
843 * "The function EndPrimitive() is equivalent to calling
844 * EndStreamPrimitive() with <stream> set to zero."
846 * Since we can call EmitVertex() and EndPrimitive() when we output
847 * primitives other than points, calling EmitStreamVertex(0) or
848 * EmitEndPrimitive(0) should not produce errors. This it also what Nvidia
849 * does. Currently we only set prog->Geom.UsesStreams to TRUE when
850 * EmitStreamVertex() or EmitEndPrimitive() are called with a non-zero
853 if (prog
->Geom
.UsesStreams
&& prog
->Geom
.OutputType
!= GL_POINTS
) {
854 linker_error(prog
, "EmitStreamVertex(n) and EndStreamPrimitive(n) "
855 "with n>0 requires point output\n");
861 validate_intrastage_arrays(struct gl_shader_program
*prog
,
862 ir_variable
*const var
,
863 ir_variable
*const existing
)
865 /* Consider the types to be "the same" if both types are arrays
866 * of the same type and one of the arrays is implicitly sized.
867 * In addition, set the type of the linked variable to the
868 * explicitly sized array.
870 if (var
->type
->is_array() && existing
->type
->is_array()) {
871 if ((var
->type
->fields
.array
== existing
->type
->fields
.array
) &&
872 ((var
->type
->length
== 0)|| (existing
->type
->length
== 0))) {
873 if (var
->type
->length
!= 0) {
874 if (var
->type
->length
<= existing
->data
.max_array_access
) {
875 linker_error(prog
, "%s `%s' declared as type "
876 "`%s' but outermost dimension has an index"
879 var
->name
, var
->type
->name
,
880 existing
->data
.max_array_access
);
882 existing
->type
= var
->type
;
884 } else if (existing
->type
->length
!= 0) {
885 if(existing
->type
->length
<= var
->data
.max_array_access
&&
886 !existing
->data
.from_ssbo_unsized_array
) {
887 linker_error(prog
, "%s `%s' declared as type "
888 "`%s' but outermost dimension has an index"
891 var
->name
, existing
->type
->name
,
892 var
->data
.max_array_access
);
897 /* The arrays of structs could have different glsl_type pointers but
898 * they are actually the same type. Use record_compare() to check that.
900 if (existing
->type
->fields
.array
->is_record() &&
901 var
->type
->fields
.array
->is_record() &&
902 existing
->type
->fields
.array
->record_compare(var
->type
->fields
.array
))
911 * Perform validation of global variables used across multiple shaders
914 cross_validate_globals(struct gl_shader_program
*prog
,
915 struct gl_shader
**shader_list
,
916 unsigned num_shaders
,
919 /* Examine all of the uniforms in all of the shaders and cross validate
922 glsl_symbol_table variables
;
923 for (unsigned i
= 0; i
< num_shaders
; i
++) {
924 if (shader_list
[i
] == NULL
)
927 foreach_in_list(ir_instruction
, node
, shader_list
[i
]->ir
) {
928 ir_variable
*const var
= node
->as_variable();
933 if (uniforms_only
&& (var
->data
.mode
!= ir_var_uniform
&& var
->data
.mode
!= ir_var_shader_storage
))
936 /* don't cross validate subroutine uniforms */
937 if (var
->type
->contains_subroutine())
940 /* Don't cross validate temporaries that are at global scope. These
941 * will eventually get pulled into the shaders 'main'.
943 if (var
->data
.mode
== ir_var_temporary
)
946 /* If a global with this name has already been seen, verify that the
947 * new instance has the same type. In addition, if the globals have
948 * initializers, the values of the initializers must be the same.
950 ir_variable
*const existing
= variables
.get_variable(var
->name
);
951 if (existing
!= NULL
) {
952 /* Check if types match. Interface blocks have some special
953 * rules so we handle those elsewhere.
955 if (var
->type
!= existing
->type
&&
956 !var
->is_interface_instance()) {
957 if (!validate_intrastage_arrays(prog
, var
, existing
)) {
958 if (var
->type
->is_record() && existing
->type
->is_record()
959 && existing
->type
->record_compare(var
->type
)) {
960 existing
->type
= var
->type
;
962 /* If it is an unsized array in a Shader Storage Block,
963 * two different shaders can access to different elements.
964 * Because of that, they might be converted to different
965 * sized arrays, then check that they are compatible but
966 * ignore the array size.
968 if (!(var
->data
.mode
== ir_var_shader_storage
&&
969 var
->data
.from_ssbo_unsized_array
&&
970 existing
->data
.mode
== ir_var_shader_storage
&&
971 existing
->data
.from_ssbo_unsized_array
&&
972 var
->type
->gl_type
== existing
->type
->gl_type
)) {
973 linker_error(prog
, "%s `%s' declared as type "
974 "`%s' and type `%s'\n",
976 var
->name
, var
->type
->name
,
977 existing
->type
->name
);
984 if (var
->data
.explicit_location
) {
985 if (existing
->data
.explicit_location
986 && (var
->data
.location
!= existing
->data
.location
)) {
987 linker_error(prog
, "explicit locations for %s "
988 "`%s' have differing values\n",
989 mode_string(var
), var
->name
);
993 existing
->data
.location
= var
->data
.location
;
994 existing
->data
.explicit_location
= true;
996 /* Check if uniform with implicit location was marked explicit
997 * by earlier shader stage. If so, mark it explicit in this stage
998 * too to make sure later processing does not treat it as
1001 if (existing
->data
.explicit_location
) {
1002 var
->data
.location
= existing
->data
.location
;
1003 var
->data
.explicit_location
= true;
1007 /* From the GLSL 4.20 specification:
1008 * "A link error will result if two compilation units in a program
1009 * specify different integer-constant bindings for the same
1010 * opaque-uniform name. However, it is not an error to specify a
1011 * binding on some but not all declarations for the same name"
1013 if (var
->data
.explicit_binding
) {
1014 if (existing
->data
.explicit_binding
&&
1015 var
->data
.binding
!= existing
->data
.binding
) {
1016 linker_error(prog
, "explicit bindings for %s "
1017 "`%s' have differing values\n",
1018 mode_string(var
), var
->name
);
1022 existing
->data
.binding
= var
->data
.binding
;
1023 existing
->data
.explicit_binding
= true;
1026 if (var
->type
->contains_atomic() &&
1027 var
->data
.offset
!= existing
->data
.offset
) {
1028 linker_error(prog
, "offset specifications for %s "
1029 "`%s' have differing values\n",
1030 mode_string(var
), var
->name
);
1034 /* Validate layout qualifiers for gl_FragDepth.
1036 * From the AMD/ARB_conservative_depth specs:
1038 * "If gl_FragDepth is redeclared in any fragment shader in a
1039 * program, it must be redeclared in all fragment shaders in
1040 * that program that have static assignments to
1041 * gl_FragDepth. All redeclarations of gl_FragDepth in all
1042 * fragment shaders in a single program must have the same set
1045 if (strcmp(var
->name
, "gl_FragDepth") == 0) {
1046 bool layout_declared
= var
->data
.depth_layout
!= ir_depth_layout_none
;
1047 bool layout_differs
=
1048 var
->data
.depth_layout
!= existing
->data
.depth_layout
;
1050 if (layout_declared
&& layout_differs
) {
1052 "All redeclarations of gl_FragDepth in all "
1053 "fragment shaders in a single program must have "
1054 "the same set of qualifiers.\n");
1057 if (var
->data
.used
&& layout_differs
) {
1059 "If gl_FragDepth is redeclared with a layout "
1060 "qualifier in any fragment shader, it must be "
1061 "redeclared with the same layout qualifier in "
1062 "all fragment shaders that have assignments to "
1067 /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
1069 * "If a shared global has multiple initializers, the
1070 * initializers must all be constant expressions, and they
1071 * must all have the same value. Otherwise, a link error will
1072 * result. (A shared global having only one initializer does
1073 * not require that initializer to be a constant expression.)"
1075 * Previous to 4.20 the GLSL spec simply said that initializers
1076 * must have the same value. In this case of non-constant
1077 * initializers, this was impossible to determine. As a result,
1078 * no vendor actually implemented that behavior. The 4.20
1079 * behavior matches the implemented behavior of at least one other
1080 * vendor, so we'll implement that for all GLSL versions.
1082 if (var
->constant_initializer
!= NULL
) {
1083 if (existing
->constant_initializer
!= NULL
) {
1084 if (!var
->constant_initializer
->has_value(existing
->constant_initializer
)) {
1085 linker_error(prog
, "initializers for %s "
1086 "`%s' have differing values\n",
1087 mode_string(var
), var
->name
);
1091 /* If the first-seen instance of a particular uniform did not
1092 * have an initializer but a later instance does, copy the
1093 * initializer to the version stored in the symbol table.
1095 /* FINISHME: This is wrong. The constant_value field should
1096 * FINISHME: not be modified! Imagine a case where a shader
1097 * FINISHME: without an initializer is linked in two different
1098 * FINISHME: programs with shaders that have differing
1099 * FINISHME: initializers. Linking with the first will
1100 * FINISHME: modify the shader, and linking with the second
1101 * FINISHME: will fail.
1103 existing
->constant_initializer
=
1104 var
->constant_initializer
->clone(ralloc_parent(existing
),
1109 if (var
->data
.has_initializer
) {
1110 if (existing
->data
.has_initializer
1111 && (var
->constant_initializer
== NULL
1112 || existing
->constant_initializer
== NULL
)) {
1114 "shared global variable `%s' has multiple "
1115 "non-constant initializers.\n",
1120 /* Some instance had an initializer, so keep track of that. In
1121 * this location, all sorts of initializers (constant or
1122 * otherwise) will propagate the existence to the variable
1123 * stored in the symbol table.
1125 existing
->data
.has_initializer
= true;
1128 if (existing
->data
.invariant
!= var
->data
.invariant
) {
1129 linker_error(prog
, "declarations for %s `%s' have "
1130 "mismatching invariant qualifiers\n",
1131 mode_string(var
), var
->name
);
1134 if (existing
->data
.centroid
!= var
->data
.centroid
) {
1135 linker_error(prog
, "declarations for %s `%s' have "
1136 "mismatching centroid qualifiers\n",
1137 mode_string(var
), var
->name
);
1140 if (existing
->data
.sample
!= var
->data
.sample
) {
1141 linker_error(prog
, "declarations for %s `%s` have "
1142 "mismatching sample qualifiers\n",
1143 mode_string(var
), var
->name
);
1146 if (existing
->data
.image_format
!= var
->data
.image_format
) {
1147 linker_error(prog
, "declarations for %s `%s` have "
1148 "mismatching image format qualifiers\n",
1149 mode_string(var
), var
->name
);
1153 variables
.add_variable(var
);
1160 * Perform validation of uniforms used across multiple shader stages
1163 cross_validate_uniforms(struct gl_shader_program
*prog
)
1165 cross_validate_globals(prog
, prog
->_LinkedShaders
,
1166 MESA_SHADER_STAGES
, true);
1170 * Accumulates the array of prog->BufferInterfaceBlocks and checks that all
1171 * definitons of blocks agree on their contents.
1174 interstage_cross_validate_uniform_blocks(struct gl_shader_program
*prog
)
1176 unsigned max_num_uniform_blocks
= 0;
1177 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1178 if (prog
->_LinkedShaders
[i
])
1179 max_num_uniform_blocks
+= prog
->_LinkedShaders
[i
]->NumBufferInterfaceBlocks
;
1182 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
1183 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
1185 prog
->InterfaceBlockStageIndex
[i
] = ralloc_array(prog
, int,
1186 max_num_uniform_blocks
);
1187 for (unsigned int j
= 0; j
< max_num_uniform_blocks
; j
++)
1188 prog
->InterfaceBlockStageIndex
[i
][j
] = -1;
1193 for (unsigned int j
= 0; j
< sh
->NumBufferInterfaceBlocks
; j
++) {
1194 int index
= link_cross_validate_uniform_block(prog
,
1195 &prog
->BufferInterfaceBlocks
,
1196 &prog
->NumBufferInterfaceBlocks
,
1197 &sh
->BufferInterfaceBlocks
[j
]);
1200 linker_error(prog
, "uniform block `%s' has mismatching definitions\n",
1201 sh
->BufferInterfaceBlocks
[j
].Name
);
1205 prog
->InterfaceBlockStageIndex
[i
][index
] = j
;
1214 * Populates a shaders symbol table with all global declarations
1217 populate_symbol_table(gl_shader
*sh
)
1219 sh
->symbols
= new(sh
) glsl_symbol_table
;
1221 foreach_in_list(ir_instruction
, inst
, sh
->ir
) {
1225 if ((func
= inst
->as_function()) != NULL
) {
1226 sh
->symbols
->add_function(func
);
1227 } else if ((var
= inst
->as_variable()) != NULL
) {
1228 if (var
->data
.mode
!= ir_var_temporary
)
1229 sh
->symbols
->add_variable(var
);
1236 * Remap variables referenced in an instruction tree
1238 * This is used when instruction trees are cloned from one shader and placed in
1239 * another. These trees will contain references to \c ir_variable nodes that
1240 * do not exist in the target shader. This function finds these \c ir_variable
1241 * references and replaces the references with matching variables in the target
1244 * If there is no matching variable in the target shader, a clone of the
1245 * \c ir_variable is made and added to the target shader. The new variable is
1246 * added to \b both the instruction stream and the symbol table.
1248 * \param inst IR tree that is to be processed.
1249 * \param symbols Symbol table containing global scope symbols in the
1251 * \param instructions Instruction stream where new variable declarations
1255 remap_variables(ir_instruction
*inst
, struct gl_shader
*target
,
1258 class remap_visitor
: public ir_hierarchical_visitor
{
1260 remap_visitor(struct gl_shader
*target
,
1263 this->target
= target
;
1264 this->symbols
= target
->symbols
;
1265 this->instructions
= target
->ir
;
1266 this->temps
= temps
;
1269 virtual ir_visitor_status
visit(ir_dereference_variable
*ir
)
1271 if (ir
->var
->data
.mode
== ir_var_temporary
) {
1272 ir_variable
*var
= (ir_variable
*) hash_table_find(temps
, ir
->var
);
1274 assert(var
!= NULL
);
1276 return visit_continue
;
1279 ir_variable
*const existing
=
1280 this->symbols
->get_variable(ir
->var
->name
);
1281 if (existing
!= NULL
)
1284 ir_variable
*copy
= ir
->var
->clone(this->target
, NULL
);
1286 this->symbols
->add_variable(copy
);
1287 this->instructions
->push_head(copy
);
1291 return visit_continue
;
1295 struct gl_shader
*target
;
1296 glsl_symbol_table
*symbols
;
1297 exec_list
*instructions
;
1301 remap_visitor
v(target
, temps
);
1308 * Move non-declarations from one instruction stream to another
1310 * The intended usage pattern of this function is to pass the pointer to the
1311 * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
1312 * pointer) for \c last and \c false for \c make_copies on the first
1313 * call. Successive calls pass the return value of the previous call for
1314 * \c last and \c true for \c make_copies.
1316 * \param instructions Source instruction stream
1317 * \param last Instruction after which new instructions should be
1318 * inserted in the target instruction stream
1319 * \param make_copies Flag selecting whether instructions in \c instructions
1320 * should be copied (via \c ir_instruction::clone) into the
1321 * target list or moved.
1324 * The new "last" instruction in the target instruction stream. This pointer
1325 * is suitable for use as the \c last parameter of a later call to this
1329 move_non_declarations(exec_list
*instructions
, exec_node
*last
,
1330 bool make_copies
, gl_shader
*target
)
1332 hash_table
*temps
= NULL
;
1335 temps
= hash_table_ctor(0, hash_table_pointer_hash
,
1336 hash_table_pointer_compare
);
1338 foreach_in_list_safe(ir_instruction
, inst
, instructions
) {
1339 if (inst
->as_function())
1342 ir_variable
*var
= inst
->as_variable();
1343 if ((var
!= NULL
) && (var
->data
.mode
!= ir_var_temporary
))
1346 assert(inst
->as_assignment()
1348 || inst
->as_if() /* for initializers with the ?: operator */
1349 || ((var
!= NULL
) && (var
->data
.mode
== ir_var_temporary
)));
1352 inst
= inst
->clone(target
, NULL
);
1355 hash_table_insert(temps
, inst
, var
);
1357 remap_variables(inst
, target
, temps
);
1362 last
->insert_after(inst
);
1367 hash_table_dtor(temps
);
1374 * This class is only used in link_intrastage_shaders() below but declaring
1375 * it inside that function leads to compiler warnings with some versions of
1378 class array_sizing_visitor
: public ir_hierarchical_visitor
{
1380 array_sizing_visitor()
1381 : mem_ctx(ralloc_context(NULL
)),
1382 unnamed_interfaces(hash_table_ctor(0, hash_table_pointer_hash
,
1383 hash_table_pointer_compare
))
1387 ~array_sizing_visitor()
1389 hash_table_dtor(this->unnamed_interfaces
);
1390 ralloc_free(this->mem_ctx
);
1393 virtual ir_visitor_status
visit(ir_variable
*var
)
1395 const glsl_type
*type_without_array
;
1396 fixup_type(&var
->type
, var
->data
.max_array_access
,
1397 var
->data
.from_ssbo_unsized_array
);
1398 type_without_array
= var
->type
->without_array();
1399 if (var
->type
->is_interface()) {
1400 if (interface_contains_unsized_arrays(var
->type
)) {
1401 const glsl_type
*new_type
=
1402 resize_interface_members(var
->type
,
1403 var
->get_max_ifc_array_access(),
1404 var
->is_in_shader_storage_block());
1405 var
->type
= new_type
;
1406 var
->change_interface_type(new_type
);
1408 } else if (type_without_array
->is_interface()) {
1409 if (interface_contains_unsized_arrays(type_without_array
)) {
1410 const glsl_type
*new_type
=
1411 resize_interface_members(type_without_array
,
1412 var
->get_max_ifc_array_access(),
1413 var
->is_in_shader_storage_block());
1414 var
->change_interface_type(new_type
);
1415 var
->type
= update_interface_members_array(var
->type
, new_type
);
1417 } else if (const glsl_type
*ifc_type
= var
->get_interface_type()) {
1418 /* Store a pointer to the variable in the unnamed_interfaces
1421 ir_variable
**interface_vars
= (ir_variable
**)
1422 hash_table_find(this->unnamed_interfaces
, ifc_type
);
1423 if (interface_vars
== NULL
) {
1424 interface_vars
= rzalloc_array(mem_ctx
, ir_variable
*,
1426 hash_table_insert(this->unnamed_interfaces
, interface_vars
,
1429 unsigned index
= ifc_type
->field_index(var
->name
);
1430 assert(index
< ifc_type
->length
);
1431 assert(interface_vars
[index
] == NULL
);
1432 interface_vars
[index
] = var
;
1434 return visit_continue
;
1438 * For each unnamed interface block that was discovered while running the
1439 * visitor, adjust the interface type to reflect the newly assigned array
1440 * sizes, and fix up the ir_variable nodes to point to the new interface
1443 void fixup_unnamed_interface_types()
1445 hash_table_call_foreach(this->unnamed_interfaces
,
1446 fixup_unnamed_interface_type
, NULL
);
1451 * If the type pointed to by \c type represents an unsized array, replace
1452 * it with a sized array whose size is determined by max_array_access.
1454 static void fixup_type(const glsl_type
**type
, unsigned max_array_access
,
1455 bool from_ssbo_unsized_array
)
1457 if (!from_ssbo_unsized_array
&& (*type
)->is_unsized_array()) {
1458 *type
= glsl_type::get_array_instance((*type
)->fields
.array
,
1459 max_array_access
+ 1);
1460 assert(*type
!= NULL
);
1464 static const glsl_type
*
1465 update_interface_members_array(const glsl_type
*type
,
1466 const glsl_type
*new_interface_type
)
1468 const glsl_type
*element_type
= type
->fields
.array
;
1469 if (element_type
->is_array()) {
1470 const glsl_type
*new_array_type
=
1471 update_interface_members_array(element_type
, new_interface_type
);
1472 return glsl_type::get_array_instance(new_array_type
, type
->length
);
1474 return glsl_type::get_array_instance(new_interface_type
,
1480 * Determine whether the given interface type contains unsized arrays (if
1481 * it doesn't, array_sizing_visitor doesn't need to process it).
1483 static bool interface_contains_unsized_arrays(const glsl_type
*type
)
1485 for (unsigned i
= 0; i
< type
->length
; i
++) {
1486 const glsl_type
*elem_type
= type
->fields
.structure
[i
].type
;
1487 if (elem_type
->is_unsized_array())
1494 * Create a new interface type based on the given type, with unsized arrays
1495 * replaced by sized arrays whose size is determined by
1496 * max_ifc_array_access.
1498 static const glsl_type
*
1499 resize_interface_members(const glsl_type
*type
,
1500 const unsigned *max_ifc_array_access
,
1503 unsigned num_fields
= type
->length
;
1504 glsl_struct_field
*fields
= new glsl_struct_field
[num_fields
];
1505 memcpy(fields
, type
->fields
.structure
,
1506 num_fields
* sizeof(*fields
));
1507 for (unsigned i
= 0; i
< num_fields
; i
++) {
1508 /* If SSBO last member is unsized array, we don't replace it by a sized
1511 if (is_ssbo
&& i
== (num_fields
- 1))
1512 fixup_type(&fields
[i
].type
, max_ifc_array_access
[i
],
1515 fixup_type(&fields
[i
].type
, max_ifc_array_access
[i
],
1518 glsl_interface_packing packing
=
1519 (glsl_interface_packing
) type
->interface_packing
;
1520 const glsl_type
*new_ifc_type
=
1521 glsl_type::get_interface_instance(fields
, num_fields
,
1522 packing
, type
->name
);
1524 return new_ifc_type
;
1527 static void fixup_unnamed_interface_type(const void *key
, void *data
,
1530 const glsl_type
*ifc_type
= (const glsl_type
*) key
;
1531 ir_variable
**interface_vars
= (ir_variable
**) data
;
1532 unsigned num_fields
= ifc_type
->length
;
1533 glsl_struct_field
*fields
= new glsl_struct_field
[num_fields
];
1534 memcpy(fields
, ifc_type
->fields
.structure
,
1535 num_fields
* sizeof(*fields
));
1536 bool interface_type_changed
= false;
1537 for (unsigned i
= 0; i
< num_fields
; i
++) {
1538 if (interface_vars
[i
] != NULL
&&
1539 fields
[i
].type
!= interface_vars
[i
]->type
) {
1540 fields
[i
].type
= interface_vars
[i
]->type
;
1541 interface_type_changed
= true;
1544 if (!interface_type_changed
) {
1548 glsl_interface_packing packing
=
1549 (glsl_interface_packing
) ifc_type
->interface_packing
;
1550 const glsl_type
*new_ifc_type
=
1551 glsl_type::get_interface_instance(fields
, num_fields
, packing
,
1554 for (unsigned i
= 0; i
< num_fields
; i
++) {
1555 if (interface_vars
[i
] != NULL
)
1556 interface_vars
[i
]->change_interface_type(new_ifc_type
);
1561 * Memory context used to allocate the data in \c unnamed_interfaces.
1566 * Hash table from const glsl_type * to an array of ir_variable *'s
1567 * pointing to the ir_variables constituting each unnamed interface block.
1569 hash_table
*unnamed_interfaces
;
1574 * Performs the cross-validation of tessellation control shader vertices and
1575 * layout qualifiers for the attached tessellation control shaders,
1576 * and propagates them to the linked TCS and linked shader program.
1579 link_tcs_out_layout_qualifiers(struct gl_shader_program
*prog
,
1580 struct gl_shader
*linked_shader
,
1581 struct gl_shader
**shader_list
,
1582 unsigned num_shaders
)
1584 linked_shader
->TessCtrl
.VerticesOut
= 0;
1586 if (linked_shader
->Stage
!= MESA_SHADER_TESS_CTRL
)
1589 /* From the GLSL 4.0 spec (chapter 4.3.8.2):
1591 * "All tessellation control shader layout declarations in a program
1592 * must specify the same output patch vertex count. There must be at
1593 * least one layout qualifier specifying an output patch vertex count
1594 * in any program containing tessellation control shaders; however,
1595 * such a declaration is not required in all tessellation control
1599 for (unsigned i
= 0; i
< num_shaders
; i
++) {
1600 struct gl_shader
*shader
= shader_list
[i
];
1602 if (shader
->TessCtrl
.VerticesOut
!= 0) {
1603 if (linked_shader
->TessCtrl
.VerticesOut
!= 0 &&
1604 linked_shader
->TessCtrl
.VerticesOut
!= shader
->TessCtrl
.VerticesOut
) {
1605 linker_error(prog
, "tessellation control shader defined with "
1606 "conflicting output vertex count (%d and %d)\n",
1607 linked_shader
->TessCtrl
.VerticesOut
,
1608 shader
->TessCtrl
.VerticesOut
);
1611 linked_shader
->TessCtrl
.VerticesOut
= shader
->TessCtrl
.VerticesOut
;
1615 /* Just do the intrastage -> interstage propagation right now,
1616 * since we already know we're in the right type of shader program
1619 if (linked_shader
->TessCtrl
.VerticesOut
== 0) {
1620 linker_error(prog
, "tessellation control shader didn't declare "
1621 "vertices out layout qualifier\n");
1624 prog
->TessCtrl
.VerticesOut
= linked_shader
->TessCtrl
.VerticesOut
;
1629 * Performs the cross-validation of tessellation evaluation shader
1630 * primitive type, vertex spacing, ordering and point_mode layout qualifiers
1631 * for the attached tessellation evaluation shaders, and propagates them
1632 * to the linked TES and linked shader program.
1635 link_tes_in_layout_qualifiers(struct gl_shader_program
*prog
,
1636 struct gl_shader
*linked_shader
,
1637 struct gl_shader
**shader_list
,
1638 unsigned num_shaders
)
1640 linked_shader
->TessEval
.PrimitiveMode
= PRIM_UNKNOWN
;
1641 linked_shader
->TessEval
.Spacing
= 0;
1642 linked_shader
->TessEval
.VertexOrder
= 0;
1643 linked_shader
->TessEval
.PointMode
= -1;
1645 if (linked_shader
->Stage
!= MESA_SHADER_TESS_EVAL
)
1648 /* From the GLSL 4.0 spec (chapter 4.3.8.1):
1650 * "At least one tessellation evaluation shader (compilation unit) in
1651 * a program must declare a primitive mode in its input layout.
1652 * Declaration vertex spacing, ordering, and point mode identifiers is
1653 * optional. It is not required that all tessellation evaluation
1654 * shaders in a program declare a primitive mode. If spacing or
1655 * vertex ordering declarations are omitted, the tessellation
1656 * primitive generator will use equal spacing or counter-clockwise
1657 * vertex ordering, respectively. If a point mode declaration is
1658 * omitted, the tessellation primitive generator will produce lines or
1659 * triangles according to the primitive mode."
1662 for (unsigned i
= 0; i
< num_shaders
; i
++) {
1663 struct gl_shader
*shader
= shader_list
[i
];
1665 if (shader
->TessEval
.PrimitiveMode
!= PRIM_UNKNOWN
) {
1666 if (linked_shader
->TessEval
.PrimitiveMode
!= PRIM_UNKNOWN
&&
1667 linked_shader
->TessEval
.PrimitiveMode
!= shader
->TessEval
.PrimitiveMode
) {
1668 linker_error(prog
, "tessellation evaluation shader defined with "
1669 "conflicting input primitive modes.\n");
1672 linked_shader
->TessEval
.PrimitiveMode
= shader
->TessEval
.PrimitiveMode
;
1675 if (shader
->TessEval
.Spacing
!= 0) {
1676 if (linked_shader
->TessEval
.Spacing
!= 0 &&
1677 linked_shader
->TessEval
.Spacing
!= shader
->TessEval
.Spacing
) {
1678 linker_error(prog
, "tessellation evaluation shader defined with "
1679 "conflicting vertex spacing.\n");
1682 linked_shader
->TessEval
.Spacing
= shader
->TessEval
.Spacing
;
1685 if (shader
->TessEval
.VertexOrder
!= 0) {
1686 if (linked_shader
->TessEval
.VertexOrder
!= 0 &&
1687 linked_shader
->TessEval
.VertexOrder
!= shader
->TessEval
.VertexOrder
) {
1688 linker_error(prog
, "tessellation evaluation shader defined with "
1689 "conflicting ordering.\n");
1692 linked_shader
->TessEval
.VertexOrder
= shader
->TessEval
.VertexOrder
;
1695 if (shader
->TessEval
.PointMode
!= -1) {
1696 if (linked_shader
->TessEval
.PointMode
!= -1 &&
1697 linked_shader
->TessEval
.PointMode
!= shader
->TessEval
.PointMode
) {
1698 linker_error(prog
, "tessellation evaluation shader defined with "
1699 "conflicting point modes.\n");
1702 linked_shader
->TessEval
.PointMode
= shader
->TessEval
.PointMode
;
1707 /* Just do the intrastage -> interstage propagation right now,
1708 * since we already know we're in the right type of shader program
1711 if (linked_shader
->TessEval
.PrimitiveMode
== PRIM_UNKNOWN
) {
1713 "tessellation evaluation shader didn't declare input "
1714 "primitive modes.\n");
1717 prog
->TessEval
.PrimitiveMode
= linked_shader
->TessEval
.PrimitiveMode
;
1719 if (linked_shader
->TessEval
.Spacing
== 0)
1720 linked_shader
->TessEval
.Spacing
= GL_EQUAL
;
1721 prog
->TessEval
.Spacing
= linked_shader
->TessEval
.Spacing
;
1723 if (linked_shader
->TessEval
.VertexOrder
== 0)
1724 linked_shader
->TessEval
.VertexOrder
= GL_CCW
;
1725 prog
->TessEval
.VertexOrder
= linked_shader
->TessEval
.VertexOrder
;
1727 if (linked_shader
->TessEval
.PointMode
== -1)
1728 linked_shader
->TessEval
.PointMode
= GL_FALSE
;
1729 prog
->TessEval
.PointMode
= linked_shader
->TessEval
.PointMode
;
1734 * Performs the cross-validation of layout qualifiers specified in
1735 * redeclaration of gl_FragCoord for the attached fragment shaders,
1736 * and propagates them to the linked FS and linked shader program.
1739 link_fs_input_layout_qualifiers(struct gl_shader_program
*prog
,
1740 struct gl_shader
*linked_shader
,
1741 struct gl_shader
**shader_list
,
1742 unsigned num_shaders
)
1744 linked_shader
->redeclares_gl_fragcoord
= false;
1745 linked_shader
->uses_gl_fragcoord
= false;
1746 linked_shader
->origin_upper_left
= false;
1747 linked_shader
->pixel_center_integer
= false;
1749 if (linked_shader
->Stage
!= MESA_SHADER_FRAGMENT
||
1750 (prog
->Version
< 150 && !prog
->ARB_fragment_coord_conventions_enable
))
1753 for (unsigned i
= 0; i
< num_shaders
; i
++) {
1754 struct gl_shader
*shader
= shader_list
[i
];
1755 /* From the GLSL 1.50 spec, page 39:
1757 * "If gl_FragCoord is redeclared in any fragment shader in a program,
1758 * it must be redeclared in all the fragment shaders in that program
1759 * that have a static use gl_FragCoord."
1761 if ((linked_shader
->redeclares_gl_fragcoord
1762 && !shader
->redeclares_gl_fragcoord
1763 && shader
->uses_gl_fragcoord
)
1764 || (shader
->redeclares_gl_fragcoord
1765 && !linked_shader
->redeclares_gl_fragcoord
1766 && linked_shader
->uses_gl_fragcoord
)) {
1767 linker_error(prog
, "fragment shader defined with conflicting "
1768 "layout qualifiers for gl_FragCoord\n");
1771 /* From the GLSL 1.50 spec, page 39:
1773 * "All redeclarations of gl_FragCoord in all fragment shaders in a
1774 * single program must have the same set of qualifiers."
1776 if (linked_shader
->redeclares_gl_fragcoord
&& shader
->redeclares_gl_fragcoord
1777 && (shader
->origin_upper_left
!= linked_shader
->origin_upper_left
1778 || shader
->pixel_center_integer
!= linked_shader
->pixel_center_integer
)) {
1779 linker_error(prog
, "fragment shader defined with conflicting "
1780 "layout qualifiers for gl_FragCoord\n");
1783 /* Update the linked shader state. Note that uses_gl_fragcoord should
1784 * accumulate the results. The other values should replace. If there
1785 * are multiple redeclarations, all the fields except uses_gl_fragcoord
1786 * are already known to be the same.
1788 if (shader
->redeclares_gl_fragcoord
|| shader
->uses_gl_fragcoord
) {
1789 linked_shader
->redeclares_gl_fragcoord
=
1790 shader
->redeclares_gl_fragcoord
;
1791 linked_shader
->uses_gl_fragcoord
= linked_shader
->uses_gl_fragcoord
1792 || shader
->uses_gl_fragcoord
;
1793 linked_shader
->origin_upper_left
= shader
->origin_upper_left
;
1794 linked_shader
->pixel_center_integer
= shader
->pixel_center_integer
;
1797 linked_shader
->EarlyFragmentTests
|= shader
->EarlyFragmentTests
;
1802 * Performs the cross-validation of geometry shader max_vertices and
1803 * primitive type layout qualifiers for the attached geometry shaders,
1804 * and propagates them to the linked GS and linked shader program.
1807 link_gs_inout_layout_qualifiers(struct gl_shader_program
*prog
,
1808 struct gl_shader
*linked_shader
,
1809 struct gl_shader
**shader_list
,
1810 unsigned num_shaders
)
1812 linked_shader
->Geom
.VerticesOut
= 0;
1813 linked_shader
->Geom
.Invocations
= 0;
1814 linked_shader
->Geom
.InputType
= PRIM_UNKNOWN
;
1815 linked_shader
->Geom
.OutputType
= PRIM_UNKNOWN
;
1817 /* No in/out qualifiers defined for anything but GLSL 1.50+
1818 * geometry shaders so far.
1820 if (linked_shader
->Stage
!= MESA_SHADER_GEOMETRY
|| prog
->Version
< 150)
1823 /* From the GLSL 1.50 spec, page 46:
1825 * "All geometry shader output layout declarations in a program
1826 * must declare the same layout and same value for
1827 * max_vertices. There must be at least one geometry output
1828 * layout declaration somewhere in a program, but not all
1829 * geometry shaders (compilation units) are required to
1833 for (unsigned i
= 0; i
< num_shaders
; i
++) {
1834 struct gl_shader
*shader
= shader_list
[i
];
1836 if (shader
->Geom
.InputType
!= PRIM_UNKNOWN
) {
1837 if (linked_shader
->Geom
.InputType
!= PRIM_UNKNOWN
&&
1838 linked_shader
->Geom
.InputType
!= shader
->Geom
.InputType
) {
1839 linker_error(prog
, "geometry shader defined with conflicting "
1843 linked_shader
->Geom
.InputType
= shader
->Geom
.InputType
;
1846 if (shader
->Geom
.OutputType
!= PRIM_UNKNOWN
) {
1847 if (linked_shader
->Geom
.OutputType
!= PRIM_UNKNOWN
&&
1848 linked_shader
->Geom
.OutputType
!= shader
->Geom
.OutputType
) {
1849 linker_error(prog
, "geometry shader defined with conflicting "
1853 linked_shader
->Geom
.OutputType
= shader
->Geom
.OutputType
;
1856 if (shader
->Geom
.VerticesOut
!= 0) {
1857 if (linked_shader
->Geom
.VerticesOut
!= 0 &&
1858 linked_shader
->Geom
.VerticesOut
!= shader
->Geom
.VerticesOut
) {
1859 linker_error(prog
, "geometry shader defined with conflicting "
1860 "output vertex count (%d and %d)\n",
1861 linked_shader
->Geom
.VerticesOut
,
1862 shader
->Geom
.VerticesOut
);
1865 linked_shader
->Geom
.VerticesOut
= shader
->Geom
.VerticesOut
;
1868 if (shader
->Geom
.Invocations
!= 0) {
1869 if (linked_shader
->Geom
.Invocations
!= 0 &&
1870 linked_shader
->Geom
.Invocations
!= shader
->Geom
.Invocations
) {
1871 linker_error(prog
, "geometry shader defined with conflicting "
1872 "invocation count (%d and %d)\n",
1873 linked_shader
->Geom
.Invocations
,
1874 shader
->Geom
.Invocations
);
1877 linked_shader
->Geom
.Invocations
= shader
->Geom
.Invocations
;
1881 /* Just do the intrastage -> interstage propagation right now,
1882 * since we already know we're in the right type of shader program
1885 if (linked_shader
->Geom
.InputType
== PRIM_UNKNOWN
) {
1887 "geometry shader didn't declare primitive input type\n");
1890 prog
->Geom
.InputType
= linked_shader
->Geom
.InputType
;
1892 if (linked_shader
->Geom
.OutputType
== PRIM_UNKNOWN
) {
1894 "geometry shader didn't declare primitive output type\n");
1897 prog
->Geom
.OutputType
= linked_shader
->Geom
.OutputType
;
1899 if (linked_shader
->Geom
.VerticesOut
== 0) {
1901 "geometry shader didn't declare max_vertices\n");
1904 prog
->Geom
.VerticesOut
= linked_shader
->Geom
.VerticesOut
;
1906 if (linked_shader
->Geom
.Invocations
== 0)
1907 linked_shader
->Geom
.Invocations
= 1;
1909 prog
->Geom
.Invocations
= linked_shader
->Geom
.Invocations
;
1914 * Perform cross-validation of compute shader local_size_{x,y,z} layout
1915 * qualifiers for the attached compute shaders, and propagate them to the
1916 * linked CS and linked shader program.
1919 link_cs_input_layout_qualifiers(struct gl_shader_program
*prog
,
1920 struct gl_shader
*linked_shader
,
1921 struct gl_shader
**shader_list
,
1922 unsigned num_shaders
)
1924 for (int i
= 0; i
< 3; i
++)
1925 linked_shader
->Comp
.LocalSize
[i
] = 0;
1927 /* This function is called for all shader stages, but it only has an effect
1928 * for compute shaders.
1930 if (linked_shader
->Stage
!= MESA_SHADER_COMPUTE
)
1933 /* From the ARB_compute_shader spec, in the section describing local size
1936 * If multiple compute shaders attached to a single program object
1937 * declare local work-group size, the declarations must be identical;
1938 * otherwise a link-time error results. Furthermore, if a program
1939 * object contains any compute shaders, at least one must contain an
1940 * input layout qualifier specifying the local work sizes of the
1941 * program, or a link-time error will occur.
1943 for (unsigned sh
= 0; sh
< num_shaders
; sh
++) {
1944 struct gl_shader
*shader
= shader_list
[sh
];
1946 if (shader
->Comp
.LocalSize
[0] != 0) {
1947 if (linked_shader
->Comp
.LocalSize
[0] != 0) {
1948 for (int i
= 0; i
< 3; i
++) {
1949 if (linked_shader
->Comp
.LocalSize
[i
] !=
1950 shader
->Comp
.LocalSize
[i
]) {
1951 linker_error(prog
, "compute shader defined with conflicting "
1957 for (int i
= 0; i
< 3; i
++)
1958 linked_shader
->Comp
.LocalSize
[i
] = shader
->Comp
.LocalSize
[i
];
1962 /* Just do the intrastage -> interstage propagation right now,
1963 * since we already know we're in the right type of shader program
1966 if (linked_shader
->Comp
.LocalSize
[0] == 0) {
1967 linker_error(prog
, "compute shader didn't declare local size\n");
1970 for (int i
= 0; i
< 3; i
++)
1971 prog
->Comp
.LocalSize
[i
] = linked_shader
->Comp
.LocalSize
[i
];
1976 * Combine a group of shaders for a single stage to generate a linked shader
1979 * If this function is supplied a single shader, it is cloned, and the new
1980 * shader is returned.
1982 static struct gl_shader
*
1983 link_intrastage_shaders(void *mem_ctx
,
1984 struct gl_context
*ctx
,
1985 struct gl_shader_program
*prog
,
1986 struct gl_shader
**shader_list
,
1987 unsigned num_shaders
)
1989 struct gl_uniform_block
*uniform_blocks
= NULL
;
1991 /* Check that global variables defined in multiple shaders are consistent.
1993 cross_validate_globals(prog
, shader_list
, num_shaders
, false);
1994 if (!prog
->LinkStatus
)
1997 /* Check that interface blocks defined in multiple shaders are consistent.
1999 validate_intrastage_interface_blocks(prog
, (const gl_shader
**)shader_list
,
2001 if (!prog
->LinkStatus
)
2004 /* Link up uniform blocks defined within this stage. */
2005 const unsigned num_uniform_blocks
=
2006 link_uniform_blocks(mem_ctx
, ctx
, prog
, shader_list
, num_shaders
,
2008 if (!prog
->LinkStatus
)
2011 /* Check that there is only a single definition of each function signature
2012 * across all shaders.
2014 for (unsigned i
= 0; i
< (num_shaders
- 1); i
++) {
2015 foreach_in_list(ir_instruction
, node
, shader_list
[i
]->ir
) {
2016 ir_function
*const f
= node
->as_function();
2021 for (unsigned j
= i
+ 1; j
< num_shaders
; j
++) {
2022 ir_function
*const other
=
2023 shader_list
[j
]->symbols
->get_function(f
->name
);
2025 /* If the other shader has no function (and therefore no function
2026 * signatures) with the same name, skip to the next shader.
2031 foreach_in_list(ir_function_signature
, sig
, &f
->signatures
) {
2032 if (!sig
->is_defined
|| sig
->is_builtin())
2035 ir_function_signature
*other_sig
=
2036 other
->exact_matching_signature(NULL
, &sig
->parameters
);
2038 if ((other_sig
!= NULL
) && other_sig
->is_defined
2039 && !other_sig
->is_builtin()) {
2040 linker_error(prog
, "function `%s' is multiply defined\n",
2049 /* Find the shader that defines main, and make a clone of it.
2051 * Starting with the clone, search for undefined references. If one is
2052 * found, find the shader that defines it. Clone the reference and add
2053 * it to the shader. Repeat until there are no undefined references or
2054 * until a reference cannot be resolved.
2056 gl_shader
*main
= NULL
;
2057 for (unsigned i
= 0; i
< num_shaders
; i
++) {
2058 if (_mesa_get_main_function_signature(shader_list
[i
]) != NULL
) {
2059 main
= shader_list
[i
];
2065 linker_error(prog
, "%s shader lacks `main'\n",
2066 _mesa_shader_stage_to_string(shader_list
[0]->Stage
));
2070 gl_shader
*linked
= ctx
->Driver
.NewShader(NULL
, 0, main
->Type
);
2071 linked
->ir
= new(linked
) exec_list
;
2072 clone_ir_list(mem_ctx
, linked
->ir
, main
->ir
);
2074 linked
->BufferInterfaceBlocks
= uniform_blocks
;
2075 linked
->NumBufferInterfaceBlocks
= num_uniform_blocks
;
2076 ralloc_steal(linked
, linked
->BufferInterfaceBlocks
);
2078 link_fs_input_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2079 link_tcs_out_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2080 link_tes_in_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2081 link_gs_inout_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2082 link_cs_input_layout_qualifiers(prog
, linked
, shader_list
, num_shaders
);
2084 populate_symbol_table(linked
);
2086 /* The pointer to the main function in the final linked shader (i.e., the
2087 * copy of the original shader that contained the main function).
2089 ir_function_signature
*const main_sig
=
2090 _mesa_get_main_function_signature(linked
);
2092 /* Move any instructions other than variable declarations or function
2093 * declarations into main.
2095 exec_node
*insertion_point
=
2096 move_non_declarations(linked
->ir
, (exec_node
*) &main_sig
->body
, false,
2099 for (unsigned i
= 0; i
< num_shaders
; i
++) {
2100 if (shader_list
[i
] == main
)
2103 insertion_point
= move_non_declarations(shader_list
[i
]->ir
,
2104 insertion_point
, true, linked
);
2107 /* Check if any shader needs built-in functions. */
2108 bool need_builtins
= false;
2109 for (unsigned i
= 0; i
< num_shaders
; i
++) {
2110 if (shader_list
[i
]->uses_builtin_functions
) {
2111 need_builtins
= true;
2117 if (need_builtins
) {
2118 /* Make a temporary array one larger than shader_list, which will hold
2119 * the built-in function shader as well.
2121 gl_shader
**linking_shaders
= (gl_shader
**)
2122 calloc(num_shaders
+ 1, sizeof(gl_shader
*));
2124 ok
= linking_shaders
!= NULL
;
2127 memcpy(linking_shaders
, shader_list
, num_shaders
* sizeof(gl_shader
*));
2128 linking_shaders
[num_shaders
] = _mesa_glsl_get_builtin_function_shader();
2130 ok
= link_function_calls(prog
, linked
, linking_shaders
, num_shaders
+ 1);
2132 free(linking_shaders
);
2134 _mesa_error_no_memory(__func__
);
2137 ok
= link_function_calls(prog
, linked
, shader_list
, num_shaders
);
2142 _mesa_delete_shader(ctx
, linked
);
2146 /* At this point linked should contain all of the linked IR, so
2147 * validate it to make sure nothing went wrong.
2149 validate_ir_tree(linked
->ir
);
2151 /* Set the size of geometry shader input arrays */
2152 if (linked
->Stage
== MESA_SHADER_GEOMETRY
) {
2153 unsigned num_vertices
= vertices_per_prim(prog
->Geom
.InputType
);
2154 geom_array_resize_visitor
input_resize_visitor(num_vertices
, prog
);
2155 foreach_in_list(ir_instruction
, ir
, linked
->ir
) {
2156 ir
->accept(&input_resize_visitor
);
2160 if (ctx
->Const
.VertexID_is_zero_based
)
2161 lower_vertex_id(linked
);
2163 /* Validate correct usage of barrier() in the tess control shader */
2164 if (linked
->Stage
== MESA_SHADER_TESS_CTRL
) {
2165 barrier_use_visitor
visitor(prog
);
2166 foreach_in_list(ir_instruction
, ir
, linked
->ir
) {
2167 ir
->accept(&visitor
);
2171 /* Make a pass over all variable declarations to ensure that arrays with
2172 * unspecified sizes have a size specified. The size is inferred from the
2173 * max_array_access field.
2175 array_sizing_visitor v
;
2177 v
.fixup_unnamed_interface_types();
2183 * Update the sizes of linked shader uniform arrays to the maximum
2186 * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
2188 * If one or more elements of an array are active,
2189 * GetActiveUniform will return the name of the array in name,
2190 * subject to the restrictions listed above. The type of the array
2191 * is returned in type. The size parameter contains the highest
2192 * array element index used, plus one. The compiler or linker
2193 * determines the highest index used. There will be only one
2194 * active uniform reported by the GL per uniform array.
2198 update_array_sizes(struct gl_shader_program
*prog
)
2200 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2201 if (prog
->_LinkedShaders
[i
] == NULL
)
2204 foreach_in_list(ir_instruction
, node
, prog
->_LinkedShaders
[i
]->ir
) {
2205 ir_variable
*const var
= node
->as_variable();
2207 if ((var
== NULL
) || (var
->data
.mode
!= ir_var_uniform
) ||
2208 !var
->type
->is_array())
2211 /* GL_ARB_uniform_buffer_object says that std140 uniforms
2212 * will not be eliminated. Since we always do std140, just
2213 * don't resize arrays in UBOs.
2215 * Atomic counters are supposed to get deterministic
2216 * locations assigned based on the declaration ordering and
2217 * sizes, array compaction would mess that up.
2219 * Subroutine uniforms are not removed.
2221 if (var
->is_in_buffer_block() || var
->type
->contains_atomic() ||
2222 var
->type
->contains_subroutine())
2225 unsigned int size
= var
->data
.max_array_access
;
2226 for (unsigned j
= 0; j
< MESA_SHADER_STAGES
; j
++) {
2227 if (prog
->_LinkedShaders
[j
] == NULL
)
2230 foreach_in_list(ir_instruction
, node2
, prog
->_LinkedShaders
[j
]->ir
) {
2231 ir_variable
*other_var
= node2
->as_variable();
2235 if (strcmp(var
->name
, other_var
->name
) == 0 &&
2236 other_var
->data
.max_array_access
> size
) {
2237 size
= other_var
->data
.max_array_access
;
2242 if (size
+ 1 != var
->type
->length
) {
2243 /* If this is a built-in uniform (i.e., it's backed by some
2244 * fixed-function state), adjust the number of state slots to
2245 * match the new array size. The number of slots per array entry
2246 * is not known. It seems safe to assume that the total number of
2247 * slots is an integer multiple of the number of array elements.
2248 * Determine the number of slots per array element by dividing by
2249 * the old (total) size.
2251 const unsigned num_slots
= var
->get_num_state_slots();
2252 if (num_slots
> 0) {
2253 var
->set_num_state_slots((size
+ 1)
2254 * (num_slots
/ var
->type
->length
));
2257 var
->type
= glsl_type::get_array_instance(var
->type
->fields
.array
,
2259 /* FINISHME: We should update the types of array
2260 * dereferences of this variable now.
2268 * Resize tessellation evaluation per-vertex inputs to the size of
2269 * tessellation control per-vertex outputs.
2272 resize_tes_inputs(struct gl_context
*ctx
,
2273 struct gl_shader_program
*prog
)
2275 if (prog
->_LinkedShaders
[MESA_SHADER_TESS_EVAL
] == NULL
)
2278 gl_shader
*const tcs
= prog
->_LinkedShaders
[MESA_SHADER_TESS_CTRL
];
2279 gl_shader
*const tes
= prog
->_LinkedShaders
[MESA_SHADER_TESS_EVAL
];
2281 /* If no control shader is present, then the TES inputs are statically
2282 * sized to MaxPatchVertices; the actual size of the arrays won't be
2283 * known until draw time.
2285 const int num_vertices
= tcs
2286 ? tcs
->TessCtrl
.VerticesOut
2287 : ctx
->Const
.MaxPatchVertices
;
2289 tess_eval_array_resize_visitor
input_resize_visitor(num_vertices
, prog
);
2290 foreach_in_list(ir_instruction
, ir
, tes
->ir
) {
2291 ir
->accept(&input_resize_visitor
);
2295 /* Convert the gl_PatchVerticesIn system value into a constant, since
2296 * the value is known at this point.
2298 foreach_in_list(ir_instruction
, ir
, tes
->ir
) {
2299 ir_variable
*var
= ir
->as_variable();
2300 if (var
&& var
->data
.mode
== ir_var_system_value
&&
2301 var
->data
.location
== SYSTEM_VALUE_VERTICES_IN
) {
2302 void *mem_ctx
= ralloc_parent(var
);
2303 var
->data
.mode
= ir_var_auto
;
2304 var
->data
.location
= 0;
2305 var
->constant_value
= new(mem_ctx
) ir_constant(num_vertices
);
2312 * Find a contiguous set of available bits in a bitmask.
2314 * \param used_mask Bits representing used (1) and unused (0) locations
2315 * \param needed_count Number of contiguous bits needed.
2318 * Base location of the available bits on success or -1 on failure.
2321 find_available_slots(unsigned used_mask
, unsigned needed_count
)
2323 unsigned needed_mask
= (1 << needed_count
) - 1;
2324 const int max_bit_to_test
= (8 * sizeof(used_mask
)) - needed_count
;
2326 /* The comparison to 32 is redundant, but without it GCC emits "warning:
2327 * cannot optimize possibly infinite loops" for the loop below.
2329 if ((needed_count
== 0) || (max_bit_to_test
< 0) || (max_bit_to_test
> 32))
2332 for (int i
= 0; i
<= max_bit_to_test
; i
++) {
2333 if ((needed_mask
& ~used_mask
) == needed_mask
)
2344 * Assign locations for either VS inputs or FS outputs
2346 * \param prog Shader program whose variables need locations assigned
2347 * \param constants Driver specific constant values for the program.
2348 * \param target_index Selector for the program target to receive location
2349 * assignmnets. Must be either \c MESA_SHADER_VERTEX or
2350 * \c MESA_SHADER_FRAGMENT.
2353 * If locations are successfully assigned, true is returned. Otherwise an
2354 * error is emitted to the shader link log and false is returned.
2357 assign_attribute_or_color_locations(gl_shader_program
*prog
,
2358 struct gl_constants
*constants
,
2359 unsigned target_index
)
2361 /* Maximum number of generic locations. This corresponds to either the
2362 * maximum number of draw buffers or the maximum number of generic
2365 unsigned max_index
= (target_index
== MESA_SHADER_VERTEX
) ?
2366 constants
->Program
[target_index
].MaxAttribs
:
2367 MAX2(constants
->MaxDrawBuffers
, constants
->MaxDualSourceDrawBuffers
);
2369 /* Mark invalid locations as being used.
2371 unsigned used_locations
= (max_index
>= 32)
2372 ? ~0 : ~((1 << max_index
) - 1);
2373 unsigned double_storage_locations
= 0;
2375 assert((target_index
== MESA_SHADER_VERTEX
)
2376 || (target_index
== MESA_SHADER_FRAGMENT
));
2378 gl_shader
*const sh
= prog
->_LinkedShaders
[target_index
];
2382 /* Operate in a total of four passes.
2384 * 1. Invalidate the location assignments for all vertex shader inputs.
2386 * 2. Assign locations for inputs that have user-defined (via
2387 * glBindVertexAttribLocation) locations and outputs that have
2388 * user-defined locations (via glBindFragDataLocation).
2390 * 3. Sort the attributes without assigned locations by number of slots
2391 * required in decreasing order. Fragmentation caused by attribute
2392 * locations assigned by the application may prevent large attributes
2393 * from having enough contiguous space.
2395 * 4. Assign locations to any inputs without assigned locations.
2398 const int generic_base
= (target_index
== MESA_SHADER_VERTEX
)
2399 ? (int) VERT_ATTRIB_GENERIC0
: (int) FRAG_RESULT_DATA0
;
2401 const enum ir_variable_mode direction
=
2402 (target_index
== MESA_SHADER_VERTEX
)
2403 ? ir_var_shader_in
: ir_var_shader_out
;
2406 /* Temporary storage for the set of attributes that need locations assigned.
2412 /* Used below in the call to qsort. */
2413 static int compare(const void *a
, const void *b
)
2415 const temp_attr
*const l
= (const temp_attr
*) a
;
2416 const temp_attr
*const r
= (const temp_attr
*) b
;
2418 /* Reversed because we want a descending order sort below. */
2419 return r
->slots
- l
->slots
;
2423 unsigned num_attr
= 0;
2425 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
2426 ir_variable
*const var
= node
->as_variable();
2428 if ((var
== NULL
) || (var
->data
.mode
!= (unsigned) direction
))
2431 if (var
->data
.explicit_location
) {
2432 var
->data
.is_unmatched_generic_inout
= 0;
2433 if ((var
->data
.location
>= (int)(max_index
+ generic_base
))
2434 || (var
->data
.location
< 0)) {
2436 "invalid explicit location %d specified for `%s'\n",
2437 (var
->data
.location
< 0)
2438 ? var
->data
.location
2439 : var
->data
.location
- generic_base
,
2443 } else if (target_index
== MESA_SHADER_VERTEX
) {
2446 if (prog
->AttributeBindings
->get(binding
, var
->name
)) {
2447 assert(binding
>= VERT_ATTRIB_GENERIC0
);
2448 var
->data
.location
= binding
;
2449 var
->data
.is_unmatched_generic_inout
= 0;
2451 } else if (target_index
== MESA_SHADER_FRAGMENT
) {
2455 if (prog
->FragDataBindings
->get(binding
, var
->name
)) {
2456 assert(binding
>= FRAG_RESULT_DATA0
);
2457 var
->data
.location
= binding
;
2458 var
->data
.is_unmatched_generic_inout
= 0;
2460 if (prog
->FragDataIndexBindings
->get(index
, var
->name
)) {
2461 var
->data
.index
= index
;
2466 /* From GL4.5 core spec, section 15.2 (Shader Execution):
2468 * "Output binding assignments will cause LinkProgram to fail:
2470 * If the program has an active output assigned to a location greater
2471 * than or equal to the value of MAX_DUAL_SOURCE_DRAW_BUFFERS and has
2472 * an active output assigned an index greater than or equal to one;"
2474 if (target_index
== MESA_SHADER_FRAGMENT
&& var
->data
.index
>= 1 &&
2475 var
->data
.location
- generic_base
>=
2476 (int) constants
->MaxDualSourceDrawBuffers
) {
2478 "output location %d >= GL_MAX_DUAL_SOURCE_DRAW_BUFFERS "
2479 "with index %u for %s\n",
2480 var
->data
.location
- generic_base
, var
->data
.index
,
2485 const unsigned slots
= var
->type
->count_attribute_slots(target_index
== MESA_SHADER_VERTEX
? true : false);
2487 /* If the variable is not a built-in and has a location statically
2488 * assigned in the shader (presumably via a layout qualifier), make sure
2489 * that it doesn't collide with other assigned locations. Otherwise,
2490 * add it to the list of variables that need linker-assigned locations.
2492 if (var
->data
.location
!= -1) {
2493 if (var
->data
.location
>= generic_base
&& var
->data
.index
< 1) {
2494 /* From page 61 of the OpenGL 4.0 spec:
2496 * "LinkProgram will fail if the attribute bindings assigned
2497 * by BindAttribLocation do not leave not enough space to
2498 * assign a location for an active matrix attribute or an
2499 * active attribute array, both of which require multiple
2500 * contiguous generic attributes."
2502 * I think above text prohibits the aliasing of explicit and
2503 * automatic assignments. But, aliasing is allowed in manual
2504 * assignments of attribute locations. See below comments for
2507 * From OpenGL 4.0 spec, page 61:
2509 * "It is possible for an application to bind more than one
2510 * attribute name to the same location. This is referred to as
2511 * aliasing. This will only work if only one of the aliased
2512 * attributes is active in the executable program, or if no
2513 * path through the shader consumes more than one attribute of
2514 * a set of attributes aliased to the same location. A link
2515 * error can occur if the linker determines that every path
2516 * through the shader consumes multiple aliased attributes,
2517 * but implementations are not required to generate an error
2520 * From GLSL 4.30 spec, page 54:
2522 * "A program will fail to link if any two non-vertex shader
2523 * input variables are assigned to the same location. For
2524 * vertex shaders, multiple input variables may be assigned
2525 * to the same location using either layout qualifiers or via
2526 * the OpenGL API. However, such aliasing is intended only to
2527 * support vertex shaders where each execution path accesses
2528 * at most one input per each location. Implementations are
2529 * permitted, but not required, to generate link-time errors
2530 * if they detect that every path through the vertex shader
2531 * executable accesses multiple inputs assigned to any single
2532 * location. For all shader types, a program will fail to link
2533 * if explicit location assignments leave the linker unable
2534 * to find space for other variables without explicit
2537 * From OpenGL ES 3.0 spec, page 56:
2539 * "Binding more than one attribute name to the same location
2540 * is referred to as aliasing, and is not permitted in OpenGL
2541 * ES Shading Language 3.00 vertex shaders. LinkProgram will
2542 * fail when this condition exists. However, aliasing is
2543 * possible in OpenGL ES Shading Language 1.00 vertex shaders.
2544 * This will only work if only one of the aliased attributes
2545 * is active in the executable program, or if no path through
2546 * the shader consumes more than one attribute of a set of
2547 * attributes aliased to the same location. A link error can
2548 * occur if the linker determines that every path through the
2549 * shader consumes multiple aliased attributes, but implemen-
2550 * tations are not required to generate an error in this case."
2552 * After looking at above references from OpenGL, OpenGL ES and
2553 * GLSL specifications, we allow aliasing of vertex input variables
2554 * in: OpenGL 2.0 (and above) and OpenGL ES 2.0.
2556 * NOTE: This is not required by the spec but its worth mentioning
2557 * here that we're not doing anything to make sure that no path
2558 * through the vertex shader executable accesses multiple inputs
2559 * assigned to any single location.
2562 /* Mask representing the contiguous slots that will be used by
2565 const unsigned attr
= var
->data
.location
- generic_base
;
2566 const unsigned use_mask
= (1 << slots
) - 1;
2567 const char *const string
= (target_index
== MESA_SHADER_VERTEX
)
2568 ? "vertex shader input" : "fragment shader output";
2570 /* Generate a link error if the requested locations for this
2571 * attribute exceed the maximum allowed attribute location.
2573 if (attr
+ slots
> max_index
) {
2575 "insufficient contiguous locations "
2576 "available for %s `%s' %d %d %d\n", string
,
2577 var
->name
, used_locations
, use_mask
, attr
);
2581 /* Generate a link error if the set of bits requested for this
2582 * attribute overlaps any previously allocated bits.
2584 if ((~(use_mask
<< attr
) & used_locations
) != used_locations
) {
2585 if (target_index
== MESA_SHADER_FRAGMENT
||
2586 (prog
->IsES
&& prog
->Version
>= 300)) {
2588 "overlapping location is assigned "
2589 "to %s `%s' %d %d %d\n", string
,
2590 var
->name
, used_locations
, use_mask
, attr
);
2593 linker_warning(prog
,
2594 "overlapping location is assigned "
2595 "to %s `%s' %d %d %d\n", string
,
2596 var
->name
, used_locations
, use_mask
, attr
);
2600 used_locations
|= (use_mask
<< attr
);
2602 /* From the GL 4.5 core spec, section 11.1.1 (Vertex Attributes):
2604 * "A program with more than the value of MAX_VERTEX_ATTRIBS
2605 * active attribute variables may fail to link, unless
2606 * device-dependent optimizations are able to make the program
2607 * fit within available hardware resources. For the purposes
2608 * of this test, attribute variables of the type dvec3, dvec4,
2609 * dmat2x3, dmat2x4, dmat3, dmat3x4, dmat4x3, and dmat4 may
2610 * count as consuming twice as many attributes as equivalent
2611 * single-precision types. While these types use the same number
2612 * of generic attributes as their single-precision equivalents,
2613 * implementations are permitted to consume two single-precision
2614 * vectors of internal storage for each three- or four-component
2615 * double-precision vector."
2617 * Mark this attribute slot as taking up twice as much space
2618 * so we can count it properly against limits. According to
2619 * issue (3) of the GL_ARB_vertex_attrib_64bit behavior, this
2620 * is optional behavior, but it seems preferable.
2622 if (var
->type
->without_array()->is_dual_slot_double())
2623 double_storage_locations
|= (use_mask
<< attr
);
2629 to_assign
[num_attr
].slots
= slots
;
2630 to_assign
[num_attr
].var
= var
;
2634 if (target_index
== MESA_SHADER_VERTEX
) {
2635 unsigned total_attribs_size
=
2636 _mesa_bitcount(used_locations
& ((1 << max_index
) - 1)) +
2637 _mesa_bitcount(double_storage_locations
);
2638 if (total_attribs_size
> max_index
) {
2640 "attempt to use %d vertex attribute slots only %d available ",
2641 total_attribs_size
, max_index
);
2646 /* If all of the attributes were assigned locations by the application (or
2647 * are built-in attributes with fixed locations), return early. This should
2648 * be the common case.
2653 qsort(to_assign
, num_attr
, sizeof(to_assign
[0]), temp_attr::compare
);
2655 if (target_index
== MESA_SHADER_VERTEX
) {
2656 /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS. It can
2657 * only be explicitly assigned by via glBindAttribLocation. Mark it as
2658 * reserved to prevent it from being automatically allocated below.
2660 find_deref_visitor
find("gl_Vertex");
2662 if (find
.variable_found())
2663 used_locations
|= (1 << 0);
2666 for (unsigned i
= 0; i
< num_attr
; i
++) {
2667 /* Mask representing the contiguous slots that will be used by this
2670 const unsigned use_mask
= (1 << to_assign
[i
].slots
) - 1;
2672 int location
= find_available_slots(used_locations
, to_assign
[i
].slots
);
2675 const char *const string
= (target_index
== MESA_SHADER_VERTEX
)
2676 ? "vertex shader input" : "fragment shader output";
2679 "insufficient contiguous locations "
2680 "available for %s `%s'\n",
2681 string
, to_assign
[i
].var
->name
);
2685 to_assign
[i
].var
->data
.location
= generic_base
+ location
;
2686 to_assign
[i
].var
->data
.is_unmatched_generic_inout
= 0;
2687 used_locations
|= (use_mask
<< location
);
2694 * Match explicit locations of outputs to inputs and deactivate the
2695 * unmatch flag if found so we don't optimise them away.
2698 match_explicit_outputs_to_inputs(struct gl_shader_program
*prog
,
2699 gl_shader
*producer
,
2700 gl_shader
*consumer
)
2702 glsl_symbol_table parameters
;
2703 ir_variable
*explicit_locations
[MAX_VARYING
] = { NULL
};
2705 /* Find all shader outputs in the "producer" stage.
2707 foreach_in_list(ir_instruction
, node
, producer
->ir
) {
2708 ir_variable
*const var
= node
->as_variable();
2710 if ((var
== NULL
) || (var
->data
.mode
!= ir_var_shader_out
))
2713 if (var
->data
.explicit_location
&&
2714 var
->data
.location
>= VARYING_SLOT_VAR0
) {
2715 const unsigned idx
= var
->data
.location
- VARYING_SLOT_VAR0
;
2716 if (explicit_locations
[idx
] == NULL
)
2717 explicit_locations
[idx
] = var
;
2721 /* Match inputs to outputs */
2722 foreach_in_list(ir_instruction
, node
, consumer
->ir
) {
2723 ir_variable
*const input
= node
->as_variable();
2725 if ((input
== NULL
) || (input
->data
.mode
!= ir_var_shader_in
))
2728 ir_variable
*output
= NULL
;
2729 if (input
->data
.explicit_location
2730 && input
->data
.location
>= VARYING_SLOT_VAR0
) {
2731 output
= explicit_locations
[input
->data
.location
- VARYING_SLOT_VAR0
];
2733 if (output
!= NULL
){
2734 input
->data
.is_unmatched_generic_inout
= 0;
2735 output
->data
.is_unmatched_generic_inout
= 0;
2742 * Store the gl_FragDepth layout in the gl_shader_program struct.
2745 store_fragdepth_layout(struct gl_shader_program
*prog
)
2747 if (prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
] == NULL
) {
2751 struct exec_list
*ir
= prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
]->ir
;
2753 /* We don't look up the gl_FragDepth symbol directly because if
2754 * gl_FragDepth is not used in the shader, it's removed from the IR.
2755 * However, the symbol won't be removed from the symbol table.
2757 * We're only interested in the cases where the variable is NOT removed
2760 foreach_in_list(ir_instruction
, node
, ir
) {
2761 ir_variable
*const var
= node
->as_variable();
2763 if (var
== NULL
|| var
->data
.mode
!= ir_var_shader_out
) {
2767 if (strcmp(var
->name
, "gl_FragDepth") == 0) {
2768 switch (var
->data
.depth_layout
) {
2769 case ir_depth_layout_none
:
2770 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_NONE
;
2772 case ir_depth_layout_any
:
2773 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_ANY
;
2775 case ir_depth_layout_greater
:
2776 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_GREATER
;
2778 case ir_depth_layout_less
:
2779 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_LESS
;
2781 case ir_depth_layout_unchanged
:
2782 prog
->FragDepthLayout
= FRAG_DEPTH_LAYOUT_UNCHANGED
;
2793 * Validate the resources used by a program versus the implementation limits
2796 check_resources(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
2798 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2799 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
2804 if (sh
->num_samplers
> ctx
->Const
.Program
[i
].MaxTextureImageUnits
) {
2805 linker_error(prog
, "Too many %s shader texture samplers\n",
2806 _mesa_shader_stage_to_string(i
));
2809 if (sh
->num_uniform_components
>
2810 ctx
->Const
.Program
[i
].MaxUniformComponents
) {
2811 if (ctx
->Const
.GLSLSkipStrictMaxUniformLimitCheck
) {
2812 linker_warning(prog
, "Too many %s shader default uniform block "
2813 "components, but the driver will try to optimize "
2814 "them out; this is non-portable out-of-spec "
2816 _mesa_shader_stage_to_string(i
));
2818 linker_error(prog
, "Too many %s shader default uniform block "
2820 _mesa_shader_stage_to_string(i
));
2824 if (sh
->num_combined_uniform_components
>
2825 ctx
->Const
.Program
[i
].MaxCombinedUniformComponents
) {
2826 if (ctx
->Const
.GLSLSkipStrictMaxUniformLimitCheck
) {
2827 linker_warning(prog
, "Too many %s shader uniform components, "
2828 "but the driver will try to optimize them out; "
2829 "this is non-portable out-of-spec behavior\n",
2830 _mesa_shader_stage_to_string(i
));
2832 linker_error(prog
, "Too many %s shader uniform components\n",
2833 _mesa_shader_stage_to_string(i
));
2838 unsigned blocks
[MESA_SHADER_STAGES
] = {0};
2839 unsigned total_uniform_blocks
= 0;
2840 unsigned shader_blocks
[MESA_SHADER_STAGES
] = {0};
2841 unsigned total_shader_storage_blocks
= 0;
2843 for (unsigned i
= 0; i
< prog
->NumBufferInterfaceBlocks
; i
++) {
2844 /* Don't check SSBOs for Uniform Block Size */
2845 if (!prog
->BufferInterfaceBlocks
[i
].IsShaderStorage
&&
2846 prog
->BufferInterfaceBlocks
[i
].UniformBufferSize
> ctx
->Const
.MaxUniformBlockSize
) {
2847 linker_error(prog
, "Uniform block %s too big (%d/%d)\n",
2848 prog
->BufferInterfaceBlocks
[i
].Name
,
2849 prog
->BufferInterfaceBlocks
[i
].UniformBufferSize
,
2850 ctx
->Const
.MaxUniformBlockSize
);
2853 if (prog
->BufferInterfaceBlocks
[i
].IsShaderStorage
&&
2854 prog
->BufferInterfaceBlocks
[i
].UniformBufferSize
> ctx
->Const
.MaxShaderStorageBlockSize
) {
2855 linker_error(prog
, "Shader storage block %s too big (%d/%d)\n",
2856 prog
->BufferInterfaceBlocks
[i
].Name
,
2857 prog
->BufferInterfaceBlocks
[i
].UniformBufferSize
,
2858 ctx
->Const
.MaxShaderStorageBlockSize
);
2861 for (unsigned j
= 0; j
< MESA_SHADER_STAGES
; j
++) {
2862 if (prog
->InterfaceBlockStageIndex
[j
][i
] != -1) {
2863 struct gl_shader
*sh
= prog
->_LinkedShaders
[j
];
2864 int stage_index
= prog
->InterfaceBlockStageIndex
[j
][i
];
2865 if (sh
&& sh
->BufferInterfaceBlocks
[stage_index
].IsShaderStorage
) {
2867 total_shader_storage_blocks
++;
2870 total_uniform_blocks
++;
2875 if (total_uniform_blocks
> ctx
->Const
.MaxCombinedUniformBlocks
) {
2876 linker_error(prog
, "Too many combined uniform blocks (%d/%d)\n",
2877 total_uniform_blocks
,
2878 ctx
->Const
.MaxCombinedUniformBlocks
);
2880 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2881 const unsigned max_uniform_blocks
=
2882 ctx
->Const
.Program
[i
].MaxUniformBlocks
;
2883 if (blocks
[i
] > max_uniform_blocks
) {
2884 linker_error(prog
, "Too many %s uniform blocks (%d/%d)\n",
2885 _mesa_shader_stage_to_string(i
),
2887 max_uniform_blocks
);
2893 if (total_shader_storage_blocks
> ctx
->Const
.MaxCombinedShaderStorageBlocks
) {
2894 linker_error(prog
, "Too many combined shader storage blocks (%d/%d)\n",
2895 total_shader_storage_blocks
,
2896 ctx
->Const
.MaxCombinedShaderStorageBlocks
);
2898 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2899 const unsigned max_shader_storage_blocks
=
2900 ctx
->Const
.Program
[i
].MaxShaderStorageBlocks
;
2901 if (shader_blocks
[i
] > max_shader_storage_blocks
) {
2902 linker_error(prog
, "Too many %s shader storage blocks (%d/%d)\n",
2903 _mesa_shader_stage_to_string(i
),
2905 max_shader_storage_blocks
);
2914 link_calculate_subroutine_compat(struct gl_shader_program
*prog
)
2916 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2917 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
2922 for (unsigned j
= 0; j
< sh
->NumSubroutineUniformRemapTable
; j
++) {
2923 struct gl_uniform_storage
*uni
= sh
->SubroutineUniformRemapTable
[j
];
2929 for (unsigned f
= 0; f
< sh
->NumSubroutineFunctions
; f
++) {
2930 struct gl_subroutine_function
*fn
= &sh
->SubroutineFunctions
[f
];
2931 for (int k
= 0; k
< fn
->num_compat_types
; k
++) {
2932 if (fn
->types
[k
] == uni
->type
) {
2938 uni
->num_compatible_subroutines
= count
;
2944 check_subroutine_resources(struct gl_shader_program
*prog
)
2946 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2947 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
2950 if (sh
->NumSubroutineUniformRemapTable
> MAX_SUBROUTINE_UNIFORM_LOCATIONS
)
2951 linker_error(prog
, "Too many %s shader subroutine uniforms\n",
2952 _mesa_shader_stage_to_string(i
));
2957 * Validate shader image resources.
2960 check_image_resources(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
2962 unsigned total_image_units
= 0;
2963 unsigned fragment_outputs
= 0;
2964 unsigned total_shader_storage_blocks
= 0;
2966 if (!ctx
->Extensions
.ARB_shader_image_load_store
)
2969 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
2970 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
2973 if (sh
->NumImages
> ctx
->Const
.Program
[i
].MaxImageUniforms
)
2974 linker_error(prog
, "Too many %s shader image uniforms (%u > %u)\n",
2975 _mesa_shader_stage_to_string(i
), sh
->NumImages
,
2976 ctx
->Const
.Program
[i
].MaxImageUniforms
);
2978 total_image_units
+= sh
->NumImages
;
2980 for (unsigned j
= 0; j
< prog
->NumBufferInterfaceBlocks
; j
++) {
2981 int stage_index
= prog
->InterfaceBlockStageIndex
[i
][j
];
2982 if (stage_index
!= -1 && sh
->BufferInterfaceBlocks
[stage_index
].IsShaderStorage
)
2983 total_shader_storage_blocks
++;
2986 if (i
== MESA_SHADER_FRAGMENT
) {
2987 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
2988 ir_variable
*var
= node
->as_variable();
2989 if (var
&& var
->data
.mode
== ir_var_shader_out
)
2990 /* since there are no double fs outputs - pass false */
2991 fragment_outputs
+= var
->type
->count_attribute_slots(false);
2997 if (total_image_units
> ctx
->Const
.MaxCombinedImageUniforms
)
2998 linker_error(prog
, "Too many combined image uniforms\n");
3000 if (total_image_units
+ fragment_outputs
+ total_shader_storage_blocks
>
3001 ctx
->Const
.MaxCombinedShaderOutputResources
)
3002 linker_error(prog
, "Too many combined image uniforms, shader storage "
3003 " buffers and fragment outputs\n");
3008 * Initializes explicit location slots to INACTIVE_UNIFORM_EXPLICIT_LOCATION
3009 * for a variable, checks for overlaps between other uniforms using explicit
3013 reserve_explicit_locations(struct gl_shader_program
*prog
,
3014 string_to_uint_map
*map
, ir_variable
*var
)
3016 unsigned slots
= var
->type
->uniform_locations();
3017 unsigned max_loc
= var
->data
.location
+ slots
- 1;
3019 /* Resize remap table if locations do not fit in the current one. */
3020 if (max_loc
+ 1 > prog
->NumUniformRemapTable
) {
3021 prog
->UniformRemapTable
=
3022 reralloc(prog
, prog
->UniformRemapTable
,
3023 gl_uniform_storage
*,
3026 if (!prog
->UniformRemapTable
) {
3027 linker_error(prog
, "Out of memory during linking.\n");
3031 /* Initialize allocated space. */
3032 for (unsigned i
= prog
->NumUniformRemapTable
; i
< max_loc
+ 1; i
++)
3033 prog
->UniformRemapTable
[i
] = NULL
;
3035 prog
->NumUniformRemapTable
= max_loc
+ 1;
3038 for (unsigned i
= 0; i
< slots
; i
++) {
3039 unsigned loc
= var
->data
.location
+ i
;
3041 /* Check if location is already used. */
3042 if (prog
->UniformRemapTable
[loc
] == INACTIVE_UNIFORM_EXPLICIT_LOCATION
) {
3044 /* Possibly same uniform from a different stage, this is ok. */
3046 if (map
->get(hash_loc
, var
->name
) && hash_loc
== loc
- i
)
3049 /* ARB_explicit_uniform_location specification states:
3051 * "No two default-block uniform variables in the program can have
3052 * the same location, even if they are unused, otherwise a compiler
3053 * or linker error will be generated."
3056 "location qualifier for uniform %s overlaps "
3057 "previously used location\n",
3062 /* Initialize location as inactive before optimization
3063 * rounds and location assignment.
3065 prog
->UniformRemapTable
[loc
] = INACTIVE_UNIFORM_EXPLICIT_LOCATION
;
3068 /* Note, base location used for arrays. */
3069 map
->put(var
->data
.location
, var
->name
);
3075 reserve_subroutine_explicit_locations(struct gl_shader_program
*prog
,
3076 struct gl_shader
*sh
,
3079 unsigned slots
= var
->type
->uniform_locations();
3080 unsigned max_loc
= var
->data
.location
+ slots
- 1;
3082 /* Resize remap table if locations do not fit in the current one. */
3083 if (max_loc
+ 1 > sh
->NumSubroutineUniformRemapTable
) {
3084 sh
->SubroutineUniformRemapTable
=
3085 reralloc(sh
, sh
->SubroutineUniformRemapTable
,
3086 gl_uniform_storage
*,
3089 if (!sh
->SubroutineUniformRemapTable
) {
3090 linker_error(prog
, "Out of memory during linking.\n");
3094 /* Initialize allocated space. */
3095 for (unsigned i
= sh
->NumSubroutineUniformRemapTable
; i
< max_loc
+ 1; i
++)
3096 sh
->SubroutineUniformRemapTable
[i
] = NULL
;
3098 sh
->NumSubroutineUniformRemapTable
= max_loc
+ 1;
3101 for (unsigned i
= 0; i
< slots
; i
++) {
3102 unsigned loc
= var
->data
.location
+ i
;
3104 /* Check if location is already used. */
3105 if (sh
->SubroutineUniformRemapTable
[loc
] == INACTIVE_UNIFORM_EXPLICIT_LOCATION
) {
3107 /* ARB_explicit_uniform_location specification states:
3108 * "No two subroutine uniform variables can have the same location
3109 * in the same shader stage, otherwise a compiler or linker error
3110 * will be generated."
3113 "location qualifier for uniform %s overlaps "
3114 "previously used location\n",
3119 /* Initialize location as inactive before optimization
3120 * rounds and location assignment.
3122 sh
->SubroutineUniformRemapTable
[loc
] = INACTIVE_UNIFORM_EXPLICIT_LOCATION
;
3128 * Check and reserve all explicit uniform locations, called before
3129 * any optimizations happen to handle also inactive uniforms and
3130 * inactive array elements that may get trimmed away.
3133 check_explicit_uniform_locations(struct gl_context
*ctx
,
3134 struct gl_shader_program
*prog
)
3136 if (!ctx
->Extensions
.ARB_explicit_uniform_location
)
3139 /* This map is used to detect if overlapping explicit locations
3140 * occur with the same uniform (from different stage) or a different one.
3142 string_to_uint_map
*uniform_map
= new string_to_uint_map
;
3145 linker_error(prog
, "Out of memory during linking.\n");
3149 unsigned entries_total
= 0;
3150 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3151 struct gl_shader
*sh
= prog
->_LinkedShaders
[i
];
3156 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
3157 ir_variable
*var
= node
->as_variable();
3158 if (!var
|| var
->data
.mode
!= ir_var_uniform
)
3161 entries_total
+= var
->type
->uniform_locations();
3163 if (var
->data
.explicit_location
) {
3165 if (var
->type
->without_array()->is_subroutine())
3166 ret
= reserve_subroutine_explicit_locations(prog
, sh
, var
);
3168 ret
= reserve_explicit_locations(prog
, uniform_map
, var
);
3177 /* Verify that total amount of entries for explicit and implicit locations
3178 * is less than MAX_UNIFORM_LOCATIONS.
3180 if (entries_total
>= ctx
->Const
.MaxUserAssignableUniformLocations
) {
3181 linker_error(prog
, "count of uniform locations >= MAX_UNIFORM_LOCATIONS"
3182 "(%u >= %u)", entries_total
,
3183 ctx
->Const
.MaxUserAssignableUniformLocations
);
3189 should_add_buffer_variable(struct gl_shader_program
*shProg
,
3190 GLenum type
, const char *name
)
3192 bool found_interface
= false;
3193 unsigned block_name_len
= 0;
3194 const char *block_name_dot
= strchr(name
, '.');
3196 /* These rules only apply to buffer variables. So we return
3197 * true for the rest of types.
3199 if (type
!= GL_BUFFER_VARIABLE
)
3202 for (unsigned i
= 0; i
< shProg
->NumBufferInterfaceBlocks
; i
++) {
3203 const char *block_name
= shProg
->BufferInterfaceBlocks
[i
].Name
;
3204 block_name_len
= strlen(block_name
);
3206 const char *block_square_bracket
= strchr(block_name
, '[');
3207 if (block_square_bracket
) {
3208 /* The block is part of an array of named interfaces,
3209 * for the name comparison we ignore the "[x]" part.
3211 block_name_len
-= strlen(block_square_bracket
);
3214 if (block_name_dot
) {
3215 /* Check if the variable name starts with the interface
3216 * name. The interface name (if present) should have the
3217 * length than the interface block name we are comparing to.
3219 unsigned len
= strlen(name
) - strlen(block_name_dot
);
3220 if (len
!= block_name_len
)
3224 if (strncmp(block_name
, name
, block_name_len
) == 0) {
3225 found_interface
= true;
3230 /* We remove the interface name from the buffer variable name,
3231 * including the dot that follows it.
3233 if (found_interface
)
3234 name
= name
+ block_name_len
+ 1;
3236 /* From: ARB_program_interface_query extension:
3238 * "For an active shader storage block member declared as an array, an
3239 * entry will be generated only for the first array element, regardless
3240 * of its type. For arrays of aggregate types, the enumeration rules are
3241 * applied recursively for the single enumerated array element.
3243 const char *struct_first_dot
= strchr(name
, '.');
3244 const char *first_square_bracket
= strchr(name
, '[');
3246 /* The buffer variable is on top level and it is not an array */
3247 if (!first_square_bracket
) {
3249 /* The shader storage block member is a struct, then generate the entry */
3250 } else if (struct_first_dot
&& struct_first_dot
< first_square_bracket
) {
3253 /* Shader storage block member is an array, only generate an entry for the
3254 * first array element.
3256 if (strncmp(first_square_bracket
, "[0]", 3) == 0)
3264 add_program_resource(struct gl_shader_program
*prog
, GLenum type
,
3265 const void *data
, uint8_t stages
)
3269 /* If resource already exists, do not add it again. */
3270 for (unsigned i
= 0; i
< prog
->NumProgramResourceList
; i
++)
3271 if (prog
->ProgramResourceList
[i
].Data
== data
)
3274 prog
->ProgramResourceList
=
3276 prog
->ProgramResourceList
,
3277 gl_program_resource
,
3278 prog
->NumProgramResourceList
+ 1);
3280 if (!prog
->ProgramResourceList
) {
3281 linker_error(prog
, "Out of memory during linking.\n");
3285 struct gl_program_resource
*res
=
3286 &prog
->ProgramResourceList
[prog
->NumProgramResourceList
];
3290 res
->StageReferences
= stages
;
3292 prog
->NumProgramResourceList
++;
3297 /* Function checks if a variable var is a packed varying and
3298 * if given name is part of packed varying's list.
3300 * If a variable is a packed varying, it has a name like
3301 * 'packed:a,b,c' where a, b and c are separate variables.
3304 included_in_packed_varying(ir_variable
*var
, const char *name
)
3306 if (strncmp(var
->name
, "packed:", 7) != 0)
3309 char *list
= strdup(var
->name
+ 7);
3314 char *token
= strtok_r(list
, ",", &saveptr
);
3316 if (strcmp(token
, name
) == 0) {
3320 token
= strtok_r(NULL
, ",", &saveptr
);
3327 * Function builds a stage reference bitmask from variable name.
3330 build_stageref(struct gl_shader_program
*shProg
, const char *name
,
3335 /* Note, that we assume MAX 8 stages, if there will be more stages, type
3336 * used for reference mask in gl_program_resource will need to be changed.
3338 assert(MESA_SHADER_STAGES
< 8);
3340 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3341 struct gl_shader
*sh
= shProg
->_LinkedShaders
[i
];
3345 /* Shader symbol table may contain variables that have
3346 * been optimized away. Search IR for the variable instead.
3348 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
3349 ir_variable
*var
= node
->as_variable();
3351 unsigned baselen
= strlen(var
->name
);
3353 if (included_in_packed_varying(var
, name
)) {
3358 /* Type needs to match if specified, otherwise we might
3359 * pick a variable with same name but different interface.
3361 if (var
->data
.mode
!= mode
)
3364 if (strncmp(var
->name
, name
, baselen
) == 0) {
3365 /* Check for exact name matches but also check for arrays and
3368 if (name
[baselen
] == '\0' ||
3369 name
[baselen
] == '[' ||
3370 name
[baselen
] == '.') {
3382 * Create gl_shader_variable from ir_variable class.
3384 static gl_shader_variable
*
3385 create_shader_variable(struct gl_shader_program
*shProg
, const ir_variable
*in
)
3387 gl_shader_variable
*out
= ralloc(shProg
, struct gl_shader_variable
);
3391 out
->type
= in
->type
;
3392 out
->name
= ralloc_strdup(shProg
, in
->name
);
3397 out
->location
= in
->data
.location
;
3398 out
->index
= in
->data
.index
;
3399 out
->patch
= in
->data
.patch
;
3400 out
->mode
= in
->data
.mode
;
3406 add_interface_variables(struct gl_shader_program
*shProg
,
3407 exec_list
*ir
, GLenum programInterface
)
3409 foreach_in_list(ir_instruction
, node
, ir
) {
3410 ir_variable
*var
= node
->as_variable();
3416 switch (var
->data
.mode
) {
3417 /* From GL 4.3 core spec, section 11.1.1 (Vertex Attributes):
3418 * "For GetActiveAttrib, all active vertex shader input variables
3419 * are enumerated, including the special built-in inputs gl_VertexID
3420 * and gl_InstanceID."
3422 case ir_var_system_value
:
3423 if (var
->data
.location
!= SYSTEM_VALUE_VERTEX_ID
&&
3424 var
->data
.location
!= SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
&&
3425 var
->data
.location
!= SYSTEM_VALUE_INSTANCE_ID
)
3427 /* Mark special built-in inputs referenced by the vertex stage so
3428 * that they are considered active by the shader queries.
3430 mask
= (1 << (MESA_SHADER_VERTEX
));
3432 case ir_var_shader_in
:
3433 if (programInterface
!= GL_PROGRAM_INPUT
)
3436 case ir_var_shader_out
:
3437 if (programInterface
!= GL_PROGRAM_OUTPUT
)
3444 /* Skip packed varyings, packed varyings are handled separately
3445 * by add_packed_varyings.
3447 if (strncmp(var
->name
, "packed:", 7) == 0)
3450 /* Skip fragdata arrays, these are handled separately
3451 * by add_fragdata_arrays.
3453 if (strncmp(var
->name
, "gl_out_FragData", 15) == 0)
3456 gl_shader_variable
*sha_v
= create_shader_variable(shProg
, var
);
3460 if (!add_program_resource(shProg
, programInterface
, sha_v
,
3461 build_stageref(shProg
, sha_v
->name
,
3462 sha_v
->mode
) | mask
))
3469 add_packed_varyings(struct gl_shader_program
*shProg
, int stage
, GLenum type
)
3471 struct gl_shader
*sh
= shProg
->_LinkedShaders
[stage
];
3474 if (!sh
|| !sh
->packed_varyings
)
3477 foreach_in_list(ir_instruction
, node
, sh
->packed_varyings
) {
3478 ir_variable
*var
= node
->as_variable();
3480 switch (var
->data
.mode
) {
3481 case ir_var_shader_in
:
3482 iface
= GL_PROGRAM_INPUT
;
3484 case ir_var_shader_out
:
3485 iface
= GL_PROGRAM_OUTPUT
;
3488 unreachable("unexpected type");
3491 if (type
== iface
) {
3492 gl_shader_variable
*sha_v
= create_shader_variable(shProg
, var
);
3495 if (!add_program_resource(shProg
, iface
, sha_v
,
3496 build_stageref(shProg
, sha_v
->name
,
3506 add_fragdata_arrays(struct gl_shader_program
*shProg
)
3508 struct gl_shader
*sh
= shProg
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
3510 if (!sh
|| !sh
->fragdata_arrays
)
3513 foreach_in_list(ir_instruction
, node
, sh
->fragdata_arrays
) {
3514 ir_variable
*var
= node
->as_variable();
3516 assert(var
->data
.mode
== ir_var_shader_out
);
3517 gl_shader_variable
*sha_v
= create_shader_variable(shProg
, var
);
3520 if (!add_program_resource(shProg
, GL_PROGRAM_OUTPUT
, sha_v
,
3521 1 << MESA_SHADER_FRAGMENT
))
3529 get_top_level_name(const char *name
)
3531 const char *first_dot
= strchr(name
, '.');
3532 const char *first_square_bracket
= strchr(name
, '[');
3534 /* From ARB_program_interface_query spec:
3536 * "For the property TOP_LEVEL_ARRAY_SIZE, a single integer identifying the
3537 * number of active array elements of the top-level shader storage block
3538 * member containing to the active variable is written to <params>. If the
3539 * top-level block member is not declared as an array, the value one is
3540 * written to <params>. If the top-level block member is an array with no
3541 * declared size, the value zero is written to <params>.
3544 /* The buffer variable is on top level.*/
3545 if (!first_square_bracket
&& !first_dot
)
3546 name_size
= strlen(name
);
3547 else if ((!first_square_bracket
||
3548 (first_dot
&& first_dot
< first_square_bracket
)))
3549 name_size
= first_dot
- name
;
3551 name_size
= first_square_bracket
- name
;
3553 return strndup(name
, name_size
);
3557 get_var_name(const char *name
)
3559 const char *first_dot
= strchr(name
, '.');
3562 return strdup(name
);
3564 return strndup(first_dot
+1, strlen(first_dot
) - 1);
3568 is_top_level_shader_storage_block_member(const char* name
,
3569 const char* interface_name
,
3570 const char* field_name
)
3572 bool result
= false;
3574 /* If the given variable is already a top-level shader storage
3575 * block member, then return array_size = 1.
3576 * We could have two possibilities: if we have an instanced
3577 * shader storage block or not instanced.
3579 * For the first, we check create a name as it was in top level and
3580 * compare it with the real name. If they are the same, then
3581 * the variable is already at top-level.
3583 * Full instanced name is: interface name + '.' + var name +
3586 int name_length
= strlen(interface_name
) + 1 + strlen(field_name
) + 1;
3587 char *full_instanced_name
= (char *) calloc(name_length
, sizeof(char));
3588 if (!full_instanced_name
) {
3589 fprintf(stderr
, "%s: Cannot allocate space for name\n", __func__
);
3593 snprintf(full_instanced_name
, name_length
, "%s.%s",
3594 interface_name
, field_name
);
3596 /* Check if its top-level shader storage block member of an
3597 * instanced interface block, or of a unnamed interface block.
3599 if (strcmp(name
, full_instanced_name
) == 0 ||
3600 strcmp(name
, field_name
) == 0)
3603 free(full_instanced_name
);
3608 get_array_size(struct gl_uniform_storage
*uni
, const glsl_struct_field
*field
,
3609 char *interface_name
, char *var_name
)
3611 /* From GL_ARB_program_interface_query spec:
3613 * "For the property TOP_LEVEL_ARRAY_SIZE, a single integer
3614 * identifying the number of active array elements of the top-level
3615 * shader storage block member containing to the active variable is
3616 * written to <params>. If the top-level block member is not
3617 * declared as an array, the value one is written to <params>. If
3618 * the top-level block member is an array with no declared size,
3619 * the value zero is written to <params>.
3621 if (is_top_level_shader_storage_block_member(uni
->name
,
3625 else if (field
->type
->is_unsized_array())
3627 else if (field
->type
->is_array())
3628 return field
->type
->length
;
3634 get_array_stride(struct gl_uniform_storage
*uni
, const glsl_type
*interface
,
3635 const glsl_struct_field
*field
, char *interface_name
,
3638 /* From GL_ARB_program_interface_query:
3640 * "For the property TOP_LEVEL_ARRAY_STRIDE, a single integer
3641 * identifying the stride between array elements of the top-level
3642 * shader storage block member containing the active variable is
3643 * written to <params>. For top-level block members declared as
3644 * arrays, the value written is the difference, in basic machine
3645 * units, between the offsets of the active variable for
3646 * consecutive elements in the top-level array. For top-level
3647 * block members not declared as an array, zero is written to
3650 if (field
->type
->is_array()) {
3651 const enum glsl_matrix_layout matrix_layout
=
3652 glsl_matrix_layout(field
->matrix_layout
);
3653 bool row_major
= matrix_layout
== GLSL_MATRIX_LAYOUT_ROW_MAJOR
;
3654 const glsl_type
*array_type
= field
->type
->fields
.array
;
3656 if (is_top_level_shader_storage_block_member(uni
->name
,
3661 if (interface
->interface_packing
!= GLSL_INTERFACE_PACKING_STD430
) {
3662 if (array_type
->is_record() || array_type
->is_array())
3663 return glsl_align(array_type
->std140_size(row_major
), 16);
3665 return MAX2(array_type
->std140_base_alignment(row_major
), 16);
3667 return array_type
->std430_array_stride(row_major
);
3674 calculate_array_size_and_stride(struct gl_shader_program
*shProg
,
3675 struct gl_uniform_storage
*uni
)
3677 int block_index
= uni
->block_index
;
3678 int array_size
= -1;
3679 int array_stride
= -1;
3680 char *var_name
= get_top_level_name(uni
->name
);
3681 char *interface_name
=
3682 get_top_level_name(shProg
->BufferInterfaceBlocks
[block_index
].Name
);
3684 if (strcmp(var_name
, interface_name
) == 0) {
3685 /* Deal with instanced array of SSBOs */
3686 char *temp_name
= get_var_name(uni
->name
);
3688 linker_error(shProg
, "Out of memory during linking.\n");
3689 goto write_top_level_array_size_and_stride
;
3692 var_name
= get_top_level_name(temp_name
);
3695 linker_error(shProg
, "Out of memory during linking.\n");
3696 goto write_top_level_array_size_and_stride
;
3700 for (unsigned i
= 0; i
< shProg
->NumShaders
; i
++) {
3701 if (shProg
->Shaders
[i
] == NULL
)
3704 const gl_shader
*stage
= shProg
->Shaders
[i
];
3705 foreach_in_list(ir_instruction
, node
, stage
->ir
) {
3706 ir_variable
*var
= node
->as_variable();
3707 if (!var
|| !var
->get_interface_type() ||
3708 var
->data
.mode
!= ir_var_shader_storage
)
3711 const glsl_type
*interface
= var
->get_interface_type();
3713 if (strcmp(interface_name
, interface
->name
) != 0)
3716 for (unsigned i
= 0; i
< interface
->length
; i
++) {
3717 const glsl_struct_field
*field
= &interface
->fields
.structure
[i
];
3718 if (strcmp(field
->name
, var_name
) != 0)
3721 array_stride
= get_array_stride(uni
, interface
, field
,
3722 interface_name
, var_name
);
3723 array_size
= get_array_size(uni
, field
, interface_name
, var_name
);
3724 goto write_top_level_array_size_and_stride
;
3728 write_top_level_array_size_and_stride
:
3729 free(interface_name
);
3731 uni
->top_level_array_stride
= array_stride
;
3732 uni
->top_level_array_size
= array_size
;
3736 * Builds up a list of program resources that point to existing
3740 build_program_resource_list(struct gl_shader_program
*shProg
)
3742 /* Rebuild resource list. */
3743 if (shProg
->ProgramResourceList
) {
3744 ralloc_free(shProg
->ProgramResourceList
);
3745 shProg
->ProgramResourceList
= NULL
;
3746 shProg
->NumProgramResourceList
= 0;
3749 int input_stage
= MESA_SHADER_STAGES
, output_stage
= 0;
3751 /* Determine first input and final output stage. These are used to
3752 * detect which variables should be enumerated in the resource list
3753 * for GL_PROGRAM_INPUT and GL_PROGRAM_OUTPUT.
3755 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3756 if (!shProg
->_LinkedShaders
[i
])
3758 if (input_stage
== MESA_SHADER_STAGES
)
3763 /* Empty shader, no resources. */
3764 if (input_stage
== MESA_SHADER_STAGES
&& output_stage
== 0)
3767 /* Program interface needs to expose varyings in case of SSO. */
3768 if (shProg
->SeparateShader
) {
3769 if (!add_packed_varyings(shProg
, input_stage
, GL_PROGRAM_INPUT
))
3772 if (!add_packed_varyings(shProg
, output_stage
, GL_PROGRAM_OUTPUT
))
3776 if (!add_fragdata_arrays(shProg
))
3779 /* Add inputs and outputs to the resource list. */
3780 if (!add_interface_variables(shProg
, shProg
->_LinkedShaders
[input_stage
]->ir
,
3784 if (!add_interface_variables(shProg
, shProg
->_LinkedShaders
[output_stage
]->ir
,
3788 /* Add transform feedback varyings. */
3789 if (shProg
->LinkedTransformFeedback
.NumVarying
> 0) {
3790 for (int i
= 0; i
< shProg
->LinkedTransformFeedback
.NumVarying
; i
++) {
3791 if (!add_program_resource(shProg
, GL_TRANSFORM_FEEDBACK_VARYING
,
3792 &shProg
->LinkedTransformFeedback
.Varyings
[i
],
3798 /* Add uniforms from uniform storage. */
3799 for (unsigned i
= 0; i
< shProg
->NumUniformStorage
; i
++) {
3800 /* Do not add uniforms internally used by Mesa. */
3801 if (shProg
->UniformStorage
[i
].hidden
)
3805 build_stageref(shProg
, shProg
->UniformStorage
[i
].name
,
3808 /* Add stagereferences for uniforms in a uniform block. */
3809 int block_index
= shProg
->UniformStorage
[i
].block_index
;
3810 if (block_index
!= -1) {
3811 for (unsigned j
= 0; j
< MESA_SHADER_STAGES
; j
++) {
3812 if (shProg
->InterfaceBlockStageIndex
[j
][block_index
] != -1)
3813 stageref
|= (1 << j
);
3817 bool is_shader_storage
= shProg
->UniformStorage
[i
].is_shader_storage
;
3818 GLenum type
= is_shader_storage
? GL_BUFFER_VARIABLE
: GL_UNIFORM
;
3819 if (!should_add_buffer_variable(shProg
, type
,
3820 shProg
->UniformStorage
[i
].name
))
3823 if (is_shader_storage
) {
3824 calculate_array_size_and_stride(shProg
, &shProg
->UniformStorage
[i
]);
3827 if (!add_program_resource(shProg
, type
,
3828 &shProg
->UniformStorage
[i
], stageref
))
3832 /* Add program uniform blocks and shader storage blocks. */
3833 for (unsigned i
= 0; i
< shProg
->NumBufferInterfaceBlocks
; i
++) {
3834 bool is_shader_storage
= shProg
->BufferInterfaceBlocks
[i
].IsShaderStorage
;
3835 GLenum type
= is_shader_storage
? GL_SHADER_STORAGE_BLOCK
: GL_UNIFORM_BLOCK
;
3836 if (!add_program_resource(shProg
, type
,
3837 &shProg
->BufferInterfaceBlocks
[i
], 0))
3841 /* Add atomic counter buffers. */
3842 for (unsigned i
= 0; i
< shProg
->NumAtomicBuffers
; i
++) {
3843 if (!add_program_resource(shProg
, GL_ATOMIC_COUNTER_BUFFER
,
3844 &shProg
->AtomicBuffers
[i
], 0))
3848 for (unsigned i
= 0; i
< shProg
->NumUniformStorage
; i
++) {
3850 if (!shProg
->UniformStorage
[i
].hidden
)
3853 for (int j
= MESA_SHADER_VERTEX
; j
< MESA_SHADER_STAGES
; j
++) {
3854 if (!shProg
->UniformStorage
[i
].opaque
[j
].active
||
3855 !shProg
->UniformStorage
[i
].type
->is_subroutine())
3858 type
= _mesa_shader_stage_to_subroutine_uniform((gl_shader_stage
)j
);
3859 /* add shader subroutines */
3860 if (!add_program_resource(shProg
, type
, &shProg
->UniformStorage
[i
], 0))
3865 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3866 struct gl_shader
*sh
= shProg
->_LinkedShaders
[i
];
3872 type
= _mesa_shader_stage_to_subroutine((gl_shader_stage
)i
);
3873 for (unsigned j
= 0; j
< sh
->NumSubroutineFunctions
; j
++) {
3874 if (!add_program_resource(shProg
, type
, &sh
->SubroutineFunctions
[j
], 0))
3881 * This check is done to make sure we allow only constant expression
3882 * indexing and "constant-index-expression" (indexing with an expression
3883 * that includes loop induction variable).
3886 validate_sampler_array_indexing(struct gl_context
*ctx
,
3887 struct gl_shader_program
*prog
)
3889 dynamic_sampler_array_indexing_visitor v
;
3890 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3891 if (prog
->_LinkedShaders
[i
] == NULL
)
3894 bool no_dynamic_indexing
=
3895 ctx
->Const
.ShaderCompilerOptions
[i
].EmitNoIndirectSampler
;
3897 /* Search for array derefs in shader. */
3898 v
.run(prog
->_LinkedShaders
[i
]->ir
);
3899 if (v
.uses_dynamic_sampler_array_indexing()) {
3900 const char *msg
= "sampler arrays indexed with non-constant "
3901 "expressions is forbidden in GLSL %s %u";
3902 /* Backend has indicated that it has no dynamic indexing support. */
3903 if (no_dynamic_indexing
) {
3904 linker_error(prog
, msg
, prog
->IsES
? "ES" : "", prog
->Version
);
3907 linker_warning(prog
, msg
, prog
->IsES
? "ES" : "", prog
->Version
);
3915 link_assign_subroutine_types(struct gl_shader_program
*prog
)
3917 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
3918 gl_shader
*sh
= prog
->_LinkedShaders
[i
];
3923 foreach_in_list(ir_instruction
, node
, sh
->ir
) {
3924 ir_function
*fn
= node
->as_function();
3928 if (fn
->is_subroutine
)
3929 sh
->NumSubroutineUniformTypes
++;
3931 if (!fn
->num_subroutine_types
)
3934 sh
->SubroutineFunctions
= reralloc(sh
, sh
->SubroutineFunctions
,
3935 struct gl_subroutine_function
,
3936 sh
->NumSubroutineFunctions
+ 1);
3937 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].name
= ralloc_strdup(sh
, fn
->name
);
3938 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].num_compat_types
= fn
->num_subroutine_types
;
3939 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].types
=
3940 ralloc_array(sh
, const struct glsl_type
*,
3941 fn
->num_subroutine_types
);
3943 /* From Section 4.4.4(Subroutine Function Layout Qualifiers) of the
3946 * "Each subroutine with an index qualifier in the shader must be
3947 * given a unique index, otherwise a compile or link error will be
3950 for (unsigned j
= 0; j
< sh
->NumSubroutineFunctions
; j
++) {
3951 if (sh
->SubroutineFunctions
[j
].index
!= -1 &&
3952 sh
->SubroutineFunctions
[j
].index
== fn
->subroutine_index
) {
3953 linker_error(prog
, "each subroutine index qualifier in the "
3954 "shader must be unique\n");
3958 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].index
=
3959 fn
->subroutine_index
;
3961 for (int j
= 0; j
< fn
->num_subroutine_types
; j
++)
3962 sh
->SubroutineFunctions
[sh
->NumSubroutineFunctions
].types
[j
] = fn
->subroutine_types
[j
];
3963 sh
->NumSubroutineFunctions
++;
3966 /* Assign index for subroutines without an explicit index*/
3968 for (unsigned j
= 0; j
< sh
->NumSubroutineFunctions
; j
++) {
3969 while (sh
->SubroutineFunctions
[j
].index
== -1) {
3970 for (unsigned k
= 0; k
< sh
->NumSubroutineFunctions
; k
++) {
3971 if (sh
->SubroutineFunctions
[k
].index
== index
)
3973 else if (k
== sh
->NumSubroutineFunctions
- 1)
3974 sh
->SubroutineFunctions
[j
].index
= index
;
3983 split_ubos_and_ssbos(void *mem_ctx
,
3984 struct gl_uniform_block
*blocks
,
3985 unsigned num_blocks
,
3986 struct gl_uniform_block
***ubos
,
3988 unsigned **ubo_interface_block_indices
,
3989 struct gl_uniform_block
***ssbos
,
3990 unsigned *num_ssbos
,
3991 unsigned **ssbo_interface_block_indices
)
3993 unsigned num_ubo_blocks
= 0;
3994 unsigned num_ssbo_blocks
= 0;
3996 for (unsigned i
= 0; i
< num_blocks
; i
++) {
3997 if (blocks
[i
].IsShaderStorage
)
4003 *ubos
= ralloc_array(mem_ctx
, gl_uniform_block
*, num_ubo_blocks
);
4006 *ssbos
= ralloc_array(mem_ctx
, gl_uniform_block
*, num_ssbo_blocks
);
4009 if (ubo_interface_block_indices
)
4010 *ubo_interface_block_indices
=
4011 ralloc_array(mem_ctx
, unsigned, num_ubo_blocks
);
4013 if (ssbo_interface_block_indices
)
4014 *ssbo_interface_block_indices
=
4015 ralloc_array(mem_ctx
, unsigned, num_ssbo_blocks
);
4017 for (unsigned i
= 0; i
< num_blocks
; i
++) {
4018 if (blocks
[i
].IsShaderStorage
) {
4019 (*ssbos
)[*num_ssbos
] = &blocks
[i
];
4020 if (ssbo_interface_block_indices
)
4021 (*ssbo_interface_block_indices
)[*num_ssbos
] = i
;
4024 (*ubos
)[*num_ubos
] = &blocks
[i
];
4025 if (ubo_interface_block_indices
)
4026 (*ubo_interface_block_indices
)[*num_ubos
] = i
;
4031 assert(*num_ubos
+ *num_ssbos
== num_blocks
);
4035 set_always_active_io(exec_list
*ir
, ir_variable_mode io_mode
)
4037 assert(io_mode
== ir_var_shader_in
|| io_mode
== ir_var_shader_out
);
4039 foreach_in_list(ir_instruction
, node
, ir
) {
4040 ir_variable
*const var
= node
->as_variable();
4042 if (var
== NULL
|| var
->data
.mode
!= io_mode
)
4045 /* Don't set always active on builtins that haven't been redeclared */
4046 if (var
->data
.how_declared
== ir_var_declared_implicitly
)
4049 var
->data
.always_active_io
= true;
4054 * When separate shader programs are enabled, only input/outputs between
4055 * the stages of a multi-stage separate program can be safely removed
4056 * from the shader interface. Other inputs/outputs must remain active.
4059 disable_varying_optimizations_for_sso(struct gl_shader_program
*prog
)
4061 unsigned first
, last
;
4062 assert(prog
->SeparateShader
);
4064 first
= MESA_SHADER_STAGES
;
4067 /* Determine first and last stage. Excluding the compute stage */
4068 for (unsigned i
= 0; i
< MESA_SHADER_COMPUTE
; i
++) {
4069 if (!prog
->_LinkedShaders
[i
])
4071 if (first
== MESA_SHADER_STAGES
)
4076 if (first
== MESA_SHADER_STAGES
)
4079 for (unsigned stage
= 0; stage
< MESA_SHADER_STAGES
; stage
++) {
4080 gl_shader
*sh
= prog
->_LinkedShaders
[stage
];
4084 if (first
== last
) {
4085 /* For a single shader program only allow inputs to the vertex shader
4086 * and outputs from the fragment shader to be removed.
4088 if (stage
!= MESA_SHADER_VERTEX
)
4089 set_always_active_io(sh
->ir
, ir_var_shader_in
);
4090 if (stage
!= MESA_SHADER_FRAGMENT
)
4091 set_always_active_io(sh
->ir
, ir_var_shader_out
);
4093 /* For multi-stage separate shader programs only allow inputs and
4094 * outputs between the shader stages to be removed as well as inputs
4095 * to the vertex shader and outputs from the fragment shader.
4097 if (stage
== first
&& stage
!= MESA_SHADER_VERTEX
)
4098 set_always_active_io(sh
->ir
, ir_var_shader_in
);
4099 else if (stage
== last
&& stage
!= MESA_SHADER_FRAGMENT
)
4100 set_always_active_io(sh
->ir
, ir_var_shader_out
);
4106 link_shaders(struct gl_context
*ctx
, struct gl_shader_program
*prog
)
4108 tfeedback_decl
*tfeedback_decls
= NULL
;
4109 unsigned num_tfeedback_decls
= prog
->TransformFeedback
.NumVarying
;
4111 void *mem_ctx
= ralloc_context(NULL
); // temporary linker context
4113 prog
->LinkStatus
= true; /* All error paths will set this to false */
4114 prog
->Validated
= false;
4115 prog
->_Used
= false;
4117 prog
->ARB_fragment_coord_conventions_enable
= false;
4119 /* Separate the shaders into groups based on their type.
4121 struct gl_shader
**shader_list
[MESA_SHADER_STAGES
];
4122 unsigned num_shaders
[MESA_SHADER_STAGES
];
4124 for (int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4125 shader_list
[i
] = (struct gl_shader
**)
4126 calloc(prog
->NumShaders
, sizeof(struct gl_shader
*));
4130 unsigned min_version
= UINT_MAX
;
4131 unsigned max_version
= 0;
4132 const bool is_es_prog
=
4133 (prog
->NumShaders
> 0 && prog
->Shaders
[0]->IsES
) ? true : false;
4134 for (unsigned i
= 0; i
< prog
->NumShaders
; i
++) {
4135 min_version
= MIN2(min_version
, prog
->Shaders
[i
]->Version
);
4136 max_version
= MAX2(max_version
, prog
->Shaders
[i
]->Version
);
4138 if (prog
->Shaders
[i
]->IsES
!= is_es_prog
) {
4139 linker_error(prog
, "all shaders must use same shading "
4140 "language version\n");
4144 if (prog
->Shaders
[i
]->ARB_fragment_coord_conventions_enable
) {
4145 prog
->ARB_fragment_coord_conventions_enable
= true;
4148 gl_shader_stage shader_type
= prog
->Shaders
[i
]->Stage
;
4149 shader_list
[shader_type
][num_shaders
[shader_type
]] = prog
->Shaders
[i
];
4150 num_shaders
[shader_type
]++;
4153 /* In desktop GLSL, different shader versions may be linked together. In
4154 * GLSL ES, all shader versions must be the same.
4156 if (is_es_prog
&& min_version
!= max_version
) {
4157 linker_error(prog
, "all shaders must use same shading "
4158 "language version\n");
4162 prog
->Version
= max_version
;
4163 prog
->IsES
= is_es_prog
;
4165 /* From OpenGL 4.5 Core specification (7.3 Program Objects):
4166 * "Linking can fail for a variety of reasons as specified in the OpenGL
4167 * Shading Language Specification, as well as any of the following
4170 * * No shader objects are attached to program.
4174 * Same rule applies for OpenGL ES >= 3.1.
4177 if (prog
->NumShaders
== 0 &&
4178 ((ctx
->API
== API_OPENGL_CORE
&& ctx
->Version
>= 45) ||
4179 (ctx
->API
== API_OPENGLES2
&& ctx
->Version
>= 31))) {
4180 linker_error(prog
, "No shader objects are attached to program.\n");
4184 /* Some shaders have to be linked with some other shaders present.
4186 if (num_shaders
[MESA_SHADER_GEOMETRY
] > 0 &&
4187 num_shaders
[MESA_SHADER_VERTEX
] == 0 &&
4188 !prog
->SeparateShader
) {
4189 linker_error(prog
, "Geometry shader must be linked with "
4193 if (num_shaders
[MESA_SHADER_TESS_EVAL
] > 0 &&
4194 num_shaders
[MESA_SHADER_VERTEX
] == 0 &&
4195 !prog
->SeparateShader
) {
4196 linker_error(prog
, "Tessellation evaluation shader must be linked with "
4200 if (num_shaders
[MESA_SHADER_TESS_CTRL
] > 0 &&
4201 num_shaders
[MESA_SHADER_VERTEX
] == 0 &&
4202 !prog
->SeparateShader
) {
4203 linker_error(prog
, "Tessellation control shader must be linked with "
4208 /* The spec is self-contradictory here. It allows linking without a tess
4209 * eval shader, but that can only be used with transform feedback and
4210 * rasterization disabled. However, transform feedback isn't allowed
4211 * with GL_PATCHES, so it can't be used.
4213 * More investigation showed that the idea of transform feedback after
4214 * a tess control shader was dropped, because some hw vendors couldn't
4215 * support tessellation without a tess eval shader, but the linker section
4216 * wasn't updated to reflect that.
4218 * All specifications (ARB_tessellation_shader, GL 4.0-4.5) have this
4221 * Do what's reasonable and always require a tess eval shader if a tess
4222 * control shader is present.
4224 if (num_shaders
[MESA_SHADER_TESS_CTRL
] > 0 &&
4225 num_shaders
[MESA_SHADER_TESS_EVAL
] == 0 &&
4226 !prog
->SeparateShader
) {
4227 linker_error(prog
, "Tessellation control shader must be linked with "
4228 "tessellation evaluation shader\n");
4232 /* Compute shaders have additional restrictions. */
4233 if (num_shaders
[MESA_SHADER_COMPUTE
] > 0 &&
4234 num_shaders
[MESA_SHADER_COMPUTE
] != prog
->NumShaders
) {
4235 linker_error(prog
, "Compute shaders may not be linked with any other "
4236 "type of shader\n");
4239 for (unsigned int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4240 if (prog
->_LinkedShaders
[i
] != NULL
)
4241 _mesa_delete_shader(ctx
, prog
->_LinkedShaders
[i
]);
4243 prog
->_LinkedShaders
[i
] = NULL
;
4246 /* Link all shaders for a particular stage and validate the result.
4248 for (int stage
= 0; stage
< MESA_SHADER_STAGES
; stage
++) {
4249 if (num_shaders
[stage
] > 0) {
4250 gl_shader
*const sh
=
4251 link_intrastage_shaders(mem_ctx
, ctx
, prog
, shader_list
[stage
],
4252 num_shaders
[stage
]);
4254 if (!prog
->LinkStatus
) {
4256 _mesa_delete_shader(ctx
, sh
);
4261 case MESA_SHADER_VERTEX
:
4262 validate_vertex_shader_executable(prog
, sh
);
4264 case MESA_SHADER_TESS_CTRL
:
4265 /* nothing to be done */
4267 case MESA_SHADER_TESS_EVAL
:
4268 validate_tess_eval_shader_executable(prog
, sh
);
4270 case MESA_SHADER_GEOMETRY
:
4271 validate_geometry_shader_executable(prog
, sh
);
4273 case MESA_SHADER_FRAGMENT
:
4274 validate_fragment_shader_executable(prog
, sh
);
4277 if (!prog
->LinkStatus
) {
4279 _mesa_delete_shader(ctx
, sh
);
4283 _mesa_reference_shader(ctx
, &prog
->_LinkedShaders
[stage
], sh
);
4287 if (num_shaders
[MESA_SHADER_GEOMETRY
] > 0)
4288 prog
->LastClipDistanceArraySize
= prog
->Geom
.ClipDistanceArraySize
;
4289 else if (num_shaders
[MESA_SHADER_TESS_EVAL
] > 0)
4290 prog
->LastClipDistanceArraySize
= prog
->TessEval
.ClipDistanceArraySize
;
4291 else if (num_shaders
[MESA_SHADER_VERTEX
] > 0)
4292 prog
->LastClipDistanceArraySize
= prog
->Vert
.ClipDistanceArraySize
;
4294 prog
->LastClipDistanceArraySize
= 0; /* Not used */
4296 /* Here begins the inter-stage linking phase. Some initial validation is
4297 * performed, then locations are assigned for uniforms, attributes, and
4300 cross_validate_uniforms(prog
);
4301 if (!prog
->LinkStatus
)
4304 unsigned first
, last
, prev
;
4306 first
= MESA_SHADER_STAGES
;
4309 /* Determine first and last stage. */
4310 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4311 if (!prog
->_LinkedShaders
[i
])
4313 if (first
== MESA_SHADER_STAGES
)
4318 check_explicit_uniform_locations(ctx
, prog
);
4319 link_assign_subroutine_types(prog
);
4321 if (!prog
->LinkStatus
)
4324 resize_tes_inputs(ctx
, prog
);
4326 /* Validate the inputs of each stage with the output of the preceding
4330 for (unsigned i
= prev
+ 1; i
<= MESA_SHADER_FRAGMENT
; i
++) {
4331 if (prog
->_LinkedShaders
[i
] == NULL
)
4334 validate_interstage_inout_blocks(prog
, prog
->_LinkedShaders
[prev
],
4335 prog
->_LinkedShaders
[i
]);
4336 if (!prog
->LinkStatus
)
4339 cross_validate_outputs_to_inputs(prog
,
4340 prog
->_LinkedShaders
[prev
],
4341 prog
->_LinkedShaders
[i
]);
4342 if (!prog
->LinkStatus
)
4348 /* Cross-validate uniform blocks between shader stages */
4349 validate_interstage_uniform_blocks(prog
, prog
->_LinkedShaders
,
4350 MESA_SHADER_STAGES
);
4351 if (!prog
->LinkStatus
)
4354 for (unsigned int i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4355 if (prog
->_LinkedShaders
[i
] != NULL
)
4356 lower_named_interface_blocks(mem_ctx
, prog
->_LinkedShaders
[i
]);
4359 /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
4360 * it before optimization because we want most of the checks to get
4361 * dropped thanks to constant propagation.
4363 * This rule also applies to GLSL ES 3.00.
4365 if (max_version
>= (is_es_prog
? 300 : 130)) {
4366 struct gl_shader
*sh
= prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
];
4368 lower_discard_flow(sh
->ir
);
4372 if (prog
->SeparateShader
)
4373 disable_varying_optimizations_for_sso(prog
);
4375 if (!interstage_cross_validate_uniform_blocks(prog
))
4378 /* Do common optimization before assigning storage for attributes,
4379 * uniforms, and varyings. Later optimization could possibly make
4380 * some of that unused.
4382 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4383 if (prog
->_LinkedShaders
[i
] == NULL
)
4386 detect_recursion_linked(prog
, prog
->_LinkedShaders
[i
]->ir
);
4387 if (!prog
->LinkStatus
)
4390 if (ctx
->Const
.ShaderCompilerOptions
[i
].LowerClipDistance
) {
4391 lower_clip_distance(prog
->_LinkedShaders
[i
]);
4394 if (ctx
->Const
.LowerTessLevel
) {
4395 lower_tess_level(prog
->_LinkedShaders
[i
]);
4398 while (do_common_optimization(prog
->_LinkedShaders
[i
]->ir
, true, false,
4399 &ctx
->Const
.ShaderCompilerOptions
[i
],
4400 ctx
->Const
.NativeIntegers
))
4403 lower_const_arrays_to_uniforms(prog
->_LinkedShaders
[i
]->ir
);
4406 /* Validation for special cases where we allow sampler array indexing
4407 * with loop induction variable. This check emits a warning or error
4408 * depending if backend can handle dynamic indexing.
4410 if ((!prog
->IsES
&& prog
->Version
< 130) ||
4411 (prog
->IsES
&& prog
->Version
< 300)) {
4412 if (!validate_sampler_array_indexing(ctx
, prog
))
4416 /* Check and validate stream emissions in geometry shaders */
4417 validate_geometry_shader_emissions(ctx
, prog
);
4419 /* Mark all generic shader inputs and outputs as unpaired. */
4420 for (unsigned i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_FRAGMENT
; i
++) {
4421 if (prog
->_LinkedShaders
[i
] != NULL
) {
4422 link_invalidate_variable_locations(prog
->_LinkedShaders
[i
]->ir
);
4427 for (unsigned i
= prev
+ 1; i
<= MESA_SHADER_FRAGMENT
; i
++) {
4428 if (prog
->_LinkedShaders
[i
] == NULL
)
4431 match_explicit_outputs_to_inputs(prog
, prog
->_LinkedShaders
[prev
],
4432 prog
->_LinkedShaders
[i
]);
4436 if (!assign_attribute_or_color_locations(prog
, &ctx
->Const
,
4437 MESA_SHADER_VERTEX
)) {
4441 if (!assign_attribute_or_color_locations(prog
, &ctx
->Const
,
4442 MESA_SHADER_FRAGMENT
)) {
4446 if (num_tfeedback_decls
!= 0) {
4447 /* From GL_EXT_transform_feedback:
4448 * A program will fail to link if:
4450 * * the <count> specified by TransformFeedbackVaryingsEXT is
4451 * non-zero, but the program object has no vertex or geometry
4454 if (first
== MESA_SHADER_FRAGMENT
) {
4455 linker_error(prog
, "Transform feedback varyings specified, but "
4456 "no vertex or geometry shader is present.\n");
4460 tfeedback_decls
= ralloc_array(mem_ctx
, tfeedback_decl
,
4461 prog
->TransformFeedback
.NumVarying
);
4462 if (!parse_tfeedback_decls(ctx
, prog
, mem_ctx
, num_tfeedback_decls
,
4463 prog
->TransformFeedback
.VaryingNames
,
4468 /* Linking the stages in the opposite order (from fragment to vertex)
4469 * ensures that inter-shader outputs written to in an earlier stage are
4470 * eliminated if they are (transitively) not used in a later stage.
4474 if (first
< MESA_SHADER_FRAGMENT
) {
4475 gl_shader
*const sh
= prog
->_LinkedShaders
[last
];
4477 if (first
!= MESA_SHADER_VERTEX
) {
4478 /* There was no vertex shader, but we still have to assign varying
4479 * locations for use by tessellation/geometry shader inputs in SSO.
4481 * If the shader is not separable (i.e., prog->SeparateShader is
4482 * false), linking will have already failed when first is not
4483 * MESA_SHADER_VERTEX.
4485 if (!assign_varying_locations(ctx
, mem_ctx
, prog
,
4486 NULL
, prog
->_LinkedShaders
[first
],
4487 num_tfeedback_decls
, tfeedback_decls
))
4491 if (last
!= MESA_SHADER_FRAGMENT
&&
4492 (num_tfeedback_decls
!= 0 || prog
->SeparateShader
)) {
4493 /* There was no fragment shader, but we still have to assign varying
4494 * locations for use by transform feedback.
4496 if (!assign_varying_locations(ctx
, mem_ctx
, prog
,
4498 num_tfeedback_decls
, tfeedback_decls
))
4502 do_dead_builtin_varyings(ctx
, sh
, NULL
,
4503 num_tfeedback_decls
, tfeedback_decls
);
4505 remove_unused_shader_inputs_and_outputs(prog
->SeparateShader
, sh
,
4508 else if (first
== MESA_SHADER_FRAGMENT
) {
4509 /* If the program only contains a fragment shader...
4511 gl_shader
*const sh
= prog
->_LinkedShaders
[first
];
4513 do_dead_builtin_varyings(ctx
, NULL
, sh
,
4514 num_tfeedback_decls
, tfeedback_decls
);
4516 if (prog
->SeparateShader
) {
4517 if (!assign_varying_locations(ctx
, mem_ctx
, prog
,
4518 NULL
/* producer */,
4520 0 /* num_tfeedback_decls */,
4521 NULL
/* tfeedback_decls */))
4524 remove_unused_shader_inputs_and_outputs(false, sh
,
4530 for (int i
= next
- 1; i
>= 0; i
--) {
4531 if (prog
->_LinkedShaders
[i
] == NULL
)
4534 gl_shader
*const sh_i
= prog
->_LinkedShaders
[i
];
4535 gl_shader
*const sh_next
= prog
->_LinkedShaders
[next
];
4537 if (!assign_varying_locations(ctx
, mem_ctx
, prog
, sh_i
, sh_next
,
4538 next
== MESA_SHADER_FRAGMENT
? num_tfeedback_decls
: 0,
4542 do_dead_builtin_varyings(ctx
, sh_i
, sh_next
,
4543 next
== MESA_SHADER_FRAGMENT
? num_tfeedback_decls
: 0,
4546 /* This must be done after all dead varyings are eliminated. */
4547 if (!check_against_output_limit(ctx
, prog
, sh_i
))
4549 if (!check_against_input_limit(ctx
, prog
, sh_next
))
4555 if (!store_tfeedback_info(ctx
, prog
, num_tfeedback_decls
, tfeedback_decls
))
4558 update_array_sizes(prog
);
4559 link_assign_uniform_locations(prog
, ctx
->Const
.UniformBooleanTrue
);
4560 link_assign_atomic_counter_resources(ctx
, prog
);
4561 store_fragdepth_layout(prog
);
4563 link_calculate_subroutine_compat(prog
);
4564 check_resources(ctx
, prog
);
4565 check_subroutine_resources(prog
);
4566 check_image_resources(ctx
, prog
);
4567 link_check_atomic_counter_resources(ctx
, prog
);
4569 if (!prog
->LinkStatus
)
4572 /* OpenGL ES requires that a vertex shader and a fragment shader both be
4573 * present in a linked program. GL_ARB_ES2_compatibility doesn't say
4574 * anything about shader linking when one of the shaders (vertex or
4575 * fragment shader) is absent. So, the extension shouldn't change the
4576 * behavior specified in GLSL specification.
4578 if (!prog
->SeparateShader
&& ctx
->API
== API_OPENGLES2
) {
4579 /* With ES < 3.1 one needs to have always vertex + fragment shader. */
4580 if (ctx
->Version
< 31) {
4581 if (prog
->_LinkedShaders
[MESA_SHADER_VERTEX
] == NULL
) {
4582 linker_error(prog
, "program lacks a vertex shader\n");
4583 } else if (prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
] == NULL
) {
4584 linker_error(prog
, "program lacks a fragment shader\n");
4587 /* From OpenGL ES 3.1 specification (7.3 Program Objects):
4588 * "Linking can fail for a variety of reasons as specified in the
4589 * OpenGL ES Shading Language Specification, as well as any of the
4590 * following reasons:
4594 * * program contains objects to form either a vertex shader or
4595 * fragment shader, and program is not separable, and does not
4596 * contain objects to form both a vertex shader and fragment
4599 if (!!prog
->_LinkedShaders
[MESA_SHADER_VERTEX
] ^
4600 !!prog
->_LinkedShaders
[MESA_SHADER_FRAGMENT
]) {
4601 linker_error(prog
, "Program needs to contain both vertex and "
4602 "fragment shaders.\n");
4607 /* Split BufferInterfaceBlocks into UniformBlocks and ShaderStorageBlocks
4608 * for gl_shader_program and gl_shader, so that drivers that need separate
4609 * index spaces for each set can have that.
4611 for (unsigned i
= MESA_SHADER_VERTEX
; i
< MESA_SHADER_STAGES
; i
++) {
4612 if (prog
->_LinkedShaders
[i
] != NULL
) {
4613 gl_shader
*sh
= prog
->_LinkedShaders
[i
];
4614 split_ubos_and_ssbos(sh
,
4615 sh
->BufferInterfaceBlocks
,
4616 sh
->NumBufferInterfaceBlocks
,
4618 &sh
->NumUniformBlocks
,
4620 &sh
->ShaderStorageBlocks
,
4621 &sh
->NumShaderStorageBlocks
,
4626 split_ubos_and_ssbos(prog
,
4627 prog
->BufferInterfaceBlocks
,
4628 prog
->NumBufferInterfaceBlocks
,
4629 &prog
->UniformBlocks
,
4630 &prog
->NumUniformBlocks
,
4631 &prog
->UboInterfaceBlockIndex
,
4632 &prog
->ShaderStorageBlocks
,
4633 &prog
->NumShaderStorageBlocks
,
4634 &prog
->SsboInterfaceBlockIndex
);
4636 /* FINISHME: Assign fragment shader output locations. */
4638 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4639 if (prog
->_LinkedShaders
[i
] == NULL
)
4642 if (ctx
->Const
.ShaderCompilerOptions
[i
].LowerBufferInterfaceBlocks
)
4643 lower_ubo_reference(prog
->_LinkedShaders
[i
]);
4645 if (ctx
->Const
.ShaderCompilerOptions
[i
].LowerShaderSharedVariables
)
4646 lower_shared_reference(prog
->_LinkedShaders
[i
],
4647 &prog
->Comp
.SharedSize
);
4649 lower_vector_derefs(prog
->_LinkedShaders
[i
]);
4653 for (unsigned i
= 0; i
< MESA_SHADER_STAGES
; i
++) {
4654 free(shader_list
[i
]);
4655 if (prog
->_LinkedShaders
[i
] == NULL
)
4658 /* Do a final validation step to make sure that the IR wasn't
4659 * invalidated by any modifications performed after intrastage linking.
4661 validate_ir_tree(prog
->_LinkedShaders
[i
]->ir
);
4663 /* Retain any live IR, but trash the rest. */
4664 reparent_ir(prog
->_LinkedShaders
[i
]->ir
, prog
->_LinkedShaders
[i
]->ir
);
4666 /* The symbol table in the linked shaders may contain references to
4667 * variables that were removed (e.g., unused uniforms). Since it may
4668 * contain junk, there is no possible valid use. Delete it and set the
4671 delete prog
->_LinkedShaders
[i
]->symbols
;
4672 prog
->_LinkedShaders
[i
]->symbols
= NULL
;
4675 ralloc_free(mem_ctx
);