glsl/linker: Validate IR just before reparenting.
[mesa.git] / src / glsl / linker.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file linker.cpp
26 * GLSL linker implementation
27 *
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
30 *
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
33 * together.
34 *
35 * - Undefined references in each shader are resolve to definitions in
36 * another shader.
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
41 *
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
44 *
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
47 *
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
51 * \c gl_FragColor.
52 *
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
55 *
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
63 *
64 * \author Ian Romanick <ian.d.romanick@intel.com>
65 */
66
67 #include "main/core.h"
68 #include "glsl_symbol_table.h"
69 #include "glsl_parser_extras.h"
70 #include "ir.h"
71 #include "program.h"
72 #include "program/hash_table.h"
73 #include "linker.h"
74 #include "link_varyings.h"
75 #include "ir_optimization.h"
76 #include "ir_rvalue_visitor.h"
77
78 extern "C" {
79 #include "main/shaderobj.h"
80 #include "main/enums.h"
81 }
82
83 void linker_error(gl_shader_program *, const char *, ...);
84
85 namespace {
86
87 /**
88 * Visitor that determines whether or not a variable is ever written.
89 */
90 class find_assignment_visitor : public ir_hierarchical_visitor {
91 public:
92 find_assignment_visitor(const char *name)
93 : name(name), found(false)
94 {
95 /* empty */
96 }
97
98 virtual ir_visitor_status visit_enter(ir_assignment *ir)
99 {
100 ir_variable *const var = ir->lhs->variable_referenced();
101
102 if (strcmp(name, var->name) == 0) {
103 found = true;
104 return visit_stop;
105 }
106
107 return visit_continue_with_parent;
108 }
109
110 virtual ir_visitor_status visit_enter(ir_call *ir)
111 {
112 exec_list_iterator sig_iter = ir->callee->parameters.iterator();
113 foreach_iter(exec_list_iterator, iter, *ir) {
114 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
115 ir_variable *sig_param = (ir_variable *)sig_iter.get();
116
117 if (sig_param->mode == ir_var_function_out ||
118 sig_param->mode == ir_var_function_inout) {
119 ir_variable *var = param_rval->variable_referenced();
120 if (var && strcmp(name, var->name) == 0) {
121 found = true;
122 return visit_stop;
123 }
124 }
125 sig_iter.next();
126 }
127
128 if (ir->return_deref != NULL) {
129 ir_variable *const var = ir->return_deref->variable_referenced();
130
131 if (strcmp(name, var->name) == 0) {
132 found = true;
133 return visit_stop;
134 }
135 }
136
137 return visit_continue_with_parent;
138 }
139
140 bool variable_found()
141 {
142 return found;
143 }
144
145 private:
146 const char *name; /**< Find writes to a variable with this name. */
147 bool found; /**< Was a write to the variable found? */
148 };
149
150
151 /**
152 * Visitor that determines whether or not a variable is ever read.
153 */
154 class find_deref_visitor : public ir_hierarchical_visitor {
155 public:
156 find_deref_visitor(const char *name)
157 : name(name), found(false)
158 {
159 /* empty */
160 }
161
162 virtual ir_visitor_status visit(ir_dereference_variable *ir)
163 {
164 if (strcmp(this->name, ir->var->name) == 0) {
165 this->found = true;
166 return visit_stop;
167 }
168
169 return visit_continue;
170 }
171
172 bool variable_found() const
173 {
174 return this->found;
175 }
176
177 private:
178 const char *name; /**< Find writes to a variable with this name. */
179 bool found; /**< Was a write to the variable found? */
180 };
181
182
183 class geom_array_resize_visitor : public ir_hierarchical_visitor {
184 public:
185 unsigned num_vertices;
186 gl_shader_program *prog;
187
188 geom_array_resize_visitor(unsigned num_vertices, gl_shader_program *prog)
189 {
190 this->num_vertices = num_vertices;
191 this->prog = prog;
192 }
193
194 virtual ~geom_array_resize_visitor()
195 {
196 /* empty */
197 }
198
199 virtual ir_visitor_status visit(ir_variable *var)
200 {
201 if (!var->type->is_array() || var->mode != ir_var_shader_in)
202 return visit_continue;
203
204 unsigned size = var->type->length;
205
206 /* Generate a link error if the shader has declared this array with an
207 * incorrect size.
208 */
209 if (size && size != this->num_vertices) {
210 linker_error(this->prog, "size of array %s declared as %u, "
211 "but number of input vertices is %u\n",
212 var->name, size, this->num_vertices);
213 return visit_continue;
214 }
215
216 /* Generate a link error if the shader attempts to access an input
217 * array using an index too large for its actual size assigned at link
218 * time.
219 */
220 if (var->max_array_access >= this->num_vertices) {
221 linker_error(this->prog, "geometry shader accesses element %i of "
222 "%s, but only %i input vertices\n",
223 var->max_array_access, var->name, this->num_vertices);
224 return visit_continue;
225 }
226
227 var->type = glsl_type::get_array_instance(var->type->element_type(),
228 this->num_vertices);
229 var->max_array_access = this->num_vertices - 1;
230
231 return visit_continue;
232 }
233
234 /* Dereferences of input variables need to be updated so that their type
235 * matches the newly assigned type of the variable they are accessing. */
236 virtual ir_visitor_status visit(ir_dereference_variable *ir)
237 {
238 ir->type = ir->var->type;
239 return visit_continue;
240 }
241
242 /* Dereferences of 2D input arrays need to be updated so that their type
243 * matches the newly assigned type of the array they are accessing. */
244 virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
245 {
246 const glsl_type *const vt = ir->array->type;
247 if (vt->is_array())
248 ir->type = vt->element_type();
249 return visit_continue;
250 }
251 };
252
253
254 /**
255 * Visitor that determines whether or not a shader uses ir_end_primitive.
256 */
257 class find_end_primitive_visitor : public ir_hierarchical_visitor {
258 public:
259 find_end_primitive_visitor()
260 : found(false)
261 {
262 /* empty */
263 }
264
265 virtual ir_visitor_status visit(ir_end_primitive *)
266 {
267 found = true;
268 return visit_stop;
269 }
270
271 bool end_primitive_found()
272 {
273 return found;
274 }
275
276 private:
277 bool found;
278 };
279
280 } /* anonymous namespace */
281
282 void
283 linker_error(gl_shader_program *prog, const char *fmt, ...)
284 {
285 va_list ap;
286
287 ralloc_strcat(&prog->InfoLog, "error: ");
288 va_start(ap, fmt);
289 ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
290 va_end(ap);
291
292 prog->LinkStatus = false;
293 }
294
295
296 void
297 linker_warning(gl_shader_program *prog, const char *fmt, ...)
298 {
299 va_list ap;
300
301 ralloc_strcat(&prog->InfoLog, "error: ");
302 va_start(ap, fmt);
303 ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
304 va_end(ap);
305
306 }
307
308
309 /**
310 * Given a string identifying a program resource, break it into a base name
311 * and an optional array index in square brackets.
312 *
313 * If an array index is present, \c out_base_name_end is set to point to the
314 * "[" that precedes the array index, and the array index itself is returned
315 * as a long.
316 *
317 * If no array index is present (or if the array index is negative or
318 * mal-formed), \c out_base_name_end, is set to point to the null terminator
319 * at the end of the input string, and -1 is returned.
320 *
321 * Only the final array index is parsed; if the string contains other array
322 * indices (or structure field accesses), they are left in the base name.
323 *
324 * No attempt is made to check that the base name is properly formed;
325 * typically the caller will look up the base name in a hash table, so
326 * ill-formed base names simply turn into hash table lookup failures.
327 */
328 long
329 parse_program_resource_name(const GLchar *name,
330 const GLchar **out_base_name_end)
331 {
332 /* Section 7.3.1 ("Program Interfaces") of the OpenGL 4.3 spec says:
333 *
334 * "When an integer array element or block instance number is part of
335 * the name string, it will be specified in decimal form without a "+"
336 * or "-" sign or any extra leading zeroes. Additionally, the name
337 * string will not include white space anywhere in the string."
338 */
339
340 const size_t len = strlen(name);
341 *out_base_name_end = name + len;
342
343 if (len == 0 || name[len-1] != ']')
344 return -1;
345
346 /* Walk backwards over the string looking for a non-digit character. This
347 * had better be the opening bracket for an array index.
348 *
349 * Initially, i specifies the location of the ']'. Since the string may
350 * contain only the ']' charcater, walk backwards very carefully.
351 */
352 unsigned i;
353 for (i = len - 1; (i > 0) && isdigit(name[i-1]); --i)
354 /* empty */ ;
355
356 if ((i == 0) || name[i-1] != '[')
357 return -1;
358
359 long array_index = strtol(&name[i], NULL, 10);
360 if (array_index < 0)
361 return -1;
362
363 *out_base_name_end = name + (i - 1);
364 return array_index;
365 }
366
367
368 void
369 link_invalidate_variable_locations(exec_list *ir)
370 {
371 foreach_list(node, ir) {
372 ir_variable *const var = ((ir_instruction *) node)->as_variable();
373
374 if (var == NULL)
375 continue;
376
377 /* Only assign locations for variables that lack an explicit location.
378 * Explicit locations are set for all built-in variables, generic vertex
379 * shader inputs (via layout(location=...)), and generic fragment shader
380 * outputs (also via layout(location=...)).
381 */
382 if (!var->explicit_location) {
383 var->location = -1;
384 var->location_frac = 0;
385 }
386
387 /* ir_variable::is_unmatched_generic_inout is used by the linker while
388 * connecting outputs from one stage to inputs of the next stage.
389 *
390 * There are two implicit assumptions here. First, we assume that any
391 * built-in variable (i.e., non-generic in or out) will have
392 * explicit_location set. Second, we assume that any generic in or out
393 * will not have explicit_location set.
394 *
395 * This second assumption will only be valid until
396 * GL_ARB_separate_shader_objects is supported. When that extension is
397 * implemented, this function will need some modifications.
398 */
399 if (!var->explicit_location) {
400 var->is_unmatched_generic_inout = 1;
401 } else {
402 var->is_unmatched_generic_inout = 0;
403 }
404 }
405 }
406
407
408 /**
409 * Set UsesClipDistance and ClipDistanceArraySize based on the given shader.
410 *
411 * Also check for errors based on incorrect usage of gl_ClipVertex and
412 * gl_ClipDistance.
413 *
414 * Return false if an error was reported.
415 */
416 static void
417 analyze_clip_usage(const char *shader_type, struct gl_shader_program *prog,
418 struct gl_shader *shader, GLboolean *UsesClipDistance,
419 GLuint *ClipDistanceArraySize)
420 {
421 *ClipDistanceArraySize = 0;
422
423 if (!prog->IsES && prog->Version >= 130) {
424 /* From section 7.1 (Vertex Shader Special Variables) of the
425 * GLSL 1.30 spec:
426 *
427 * "It is an error for a shader to statically write both
428 * gl_ClipVertex and gl_ClipDistance."
429 *
430 * This does not apply to GLSL ES shaders, since GLSL ES defines neither
431 * gl_ClipVertex nor gl_ClipDistance.
432 */
433 find_assignment_visitor clip_vertex("gl_ClipVertex");
434 find_assignment_visitor clip_distance("gl_ClipDistance");
435
436 clip_vertex.run(shader->ir);
437 clip_distance.run(shader->ir);
438 if (clip_vertex.variable_found() && clip_distance.variable_found()) {
439 linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
440 "and `gl_ClipDistance'\n", shader_type);
441 return;
442 }
443 *UsesClipDistance = clip_distance.variable_found();
444 ir_variable *clip_distance_var =
445 shader->symbols->get_variable("gl_ClipDistance");
446 if (clip_distance_var)
447 *ClipDistanceArraySize = clip_distance_var->type->length;
448 } else {
449 *UsesClipDistance = false;
450 }
451 }
452
453
454 /**
455 * Verify that a vertex shader executable meets all semantic requirements.
456 *
457 * Also sets prog->Vert.UsesClipDistance and prog->Vert.ClipDistanceArraySize
458 * as a side effect.
459 *
460 * \param shader Vertex shader executable to be verified
461 */
462 void
463 validate_vertex_shader_executable(struct gl_shader_program *prog,
464 struct gl_shader *shader)
465 {
466 if (shader == NULL)
467 return;
468
469 /* From the GLSL 1.10 spec, page 48:
470 *
471 * "The variable gl_Position is available only in the vertex
472 * language and is intended for writing the homogeneous vertex
473 * position. All executions of a well-formed vertex shader
474 * executable must write a value into this variable. [...] The
475 * variable gl_Position is available only in the vertex
476 * language and is intended for writing the homogeneous vertex
477 * position. All executions of a well-formed vertex shader
478 * executable must write a value into this variable."
479 *
480 * while in GLSL 1.40 this text is changed to:
481 *
482 * "The variable gl_Position is available only in the vertex
483 * language and is intended for writing the homogeneous vertex
484 * position. It can be written at any time during shader
485 * execution. It may also be read back by a vertex shader
486 * after being written. This value will be used by primitive
487 * assembly, clipping, culling, and other fixed functionality
488 * operations, if present, that operate on primitives after
489 * vertex processing has occurred. Its value is undefined if
490 * the vertex shader executable does not write gl_Position."
491 *
492 * GLSL ES 3.00 is similar to GLSL 1.40--failing to write to gl_Position is
493 * not an error.
494 */
495 if (prog->Version < (prog->IsES ? 300 : 140)) {
496 find_assignment_visitor find("gl_Position");
497 find.run(shader->ir);
498 if (!find.variable_found()) {
499 linker_error(prog, "vertex shader does not write to `gl_Position'\n");
500 return;
501 }
502 }
503
504 analyze_clip_usage("vertex", prog, shader, &prog->Vert.UsesClipDistance,
505 &prog->Vert.ClipDistanceArraySize);
506 }
507
508
509 /**
510 * Verify that a fragment shader executable meets all semantic requirements
511 *
512 * \param shader Fragment shader executable to be verified
513 */
514 void
515 validate_fragment_shader_executable(struct gl_shader_program *prog,
516 struct gl_shader *shader)
517 {
518 if (shader == NULL)
519 return;
520
521 find_assignment_visitor frag_color("gl_FragColor");
522 find_assignment_visitor frag_data("gl_FragData");
523
524 frag_color.run(shader->ir);
525 frag_data.run(shader->ir);
526
527 if (frag_color.variable_found() && frag_data.variable_found()) {
528 linker_error(prog, "fragment shader writes to both "
529 "`gl_FragColor' and `gl_FragData'\n");
530 }
531 }
532
533 /**
534 * Verify that a geometry shader executable meets all semantic requirements
535 *
536 * Also sets prog->Geom.VerticesIn, prog->Geom.UsesClipDistance, and
537 * prog->Geom.ClipDistanceArraySize as a side effect.
538 *
539 * \param shader Geometry shader executable to be verified
540 */
541 void
542 validate_geometry_shader_executable(struct gl_shader_program *prog,
543 struct gl_shader *shader)
544 {
545 if (shader == NULL)
546 return;
547
548 unsigned num_vertices = vertices_per_prim(prog->Geom.InputType);
549 prog->Geom.VerticesIn = num_vertices;
550
551 analyze_clip_usage("geometry", prog, shader, &prog->Geom.UsesClipDistance,
552 &prog->Geom.ClipDistanceArraySize);
553
554 find_end_primitive_visitor end_primitive;
555 end_primitive.run(shader->ir);
556 prog->Geom.UsesEndPrimitive = end_primitive.end_primitive_found();
557 }
558
559
560 /**
561 * Perform validation of global variables used across multiple shaders
562 */
563 void
564 cross_validate_globals(struct gl_shader_program *prog,
565 struct gl_shader **shader_list,
566 unsigned num_shaders,
567 bool uniforms_only)
568 {
569 /* Examine all of the uniforms in all of the shaders and cross validate
570 * them.
571 */
572 glsl_symbol_table variables;
573 for (unsigned i = 0; i < num_shaders; i++) {
574 if (shader_list[i] == NULL)
575 continue;
576
577 foreach_list(node, shader_list[i]->ir) {
578 ir_variable *const var = ((ir_instruction *) node)->as_variable();
579
580 if (var == NULL)
581 continue;
582
583 if (uniforms_only && (var->mode != ir_var_uniform))
584 continue;
585
586 /* Don't cross validate temporaries that are at global scope. These
587 * will eventually get pulled into the shaders 'main'.
588 */
589 if (var->mode == ir_var_temporary)
590 continue;
591
592 /* If a global with this name has already been seen, verify that the
593 * new instance has the same type. In addition, if the globals have
594 * initializers, the values of the initializers must be the same.
595 */
596 ir_variable *const existing = variables.get_variable(var->name);
597 if (existing != NULL) {
598 if (var->type != existing->type) {
599 /* Consider the types to be "the same" if both types are arrays
600 * of the same type and one of the arrays is implicitly sized.
601 * In addition, set the type of the linked variable to the
602 * explicitly sized array.
603 */
604 if (var->type->is_array()
605 && existing->type->is_array()
606 && (var->type->fields.array == existing->type->fields.array)
607 && ((var->type->length == 0)
608 || (existing->type->length == 0))) {
609 if (var->type->length != 0) {
610 existing->type = var->type;
611 }
612 } else {
613 linker_error(prog, "%s `%s' declared as type "
614 "`%s' and type `%s'\n",
615 mode_string(var),
616 var->name, var->type->name,
617 existing->type->name);
618 return;
619 }
620 }
621
622 if (var->explicit_location) {
623 if (existing->explicit_location
624 && (var->location != existing->location)) {
625 linker_error(prog, "explicit locations for %s "
626 "`%s' have differing values\n",
627 mode_string(var), var->name);
628 return;
629 }
630
631 existing->location = var->location;
632 existing->explicit_location = true;
633 }
634
635 /* From the GLSL 4.20 specification:
636 * "A link error will result if two compilation units in a program
637 * specify different integer-constant bindings for the same
638 * opaque-uniform name. However, it is not an error to specify a
639 * binding on some but not all declarations for the same name"
640 */
641 if (var->explicit_binding) {
642 if (existing->explicit_binding &&
643 var->binding != existing->binding) {
644 linker_error(prog, "explicit bindings for %s "
645 "`%s' have differing values\n",
646 mode_string(var), var->name);
647 return;
648 }
649
650 existing->binding = var->binding;
651 existing->explicit_binding = true;
652 }
653
654 if (var->type->contains_atomic() &&
655 var->atomic.offset != existing->atomic.offset) {
656 linker_error(prog, "offset specifications for %s "
657 "`%s' have differing values\n",
658 mode_string(var), var->name);
659 return;
660 }
661
662 /* Validate layout qualifiers for gl_FragDepth.
663 *
664 * From the AMD/ARB_conservative_depth specs:
665 *
666 * "If gl_FragDepth is redeclared in any fragment shader in a
667 * program, it must be redeclared in all fragment shaders in
668 * that program that have static assignments to
669 * gl_FragDepth. All redeclarations of gl_FragDepth in all
670 * fragment shaders in a single program must have the same set
671 * of qualifiers."
672 */
673 if (strcmp(var->name, "gl_FragDepth") == 0) {
674 bool layout_declared = var->depth_layout != ir_depth_layout_none;
675 bool layout_differs =
676 var->depth_layout != existing->depth_layout;
677
678 if (layout_declared && layout_differs) {
679 linker_error(prog,
680 "All redeclarations of gl_FragDepth in all "
681 "fragment shaders in a single program must have "
682 "the same set of qualifiers.");
683 }
684
685 if (var->used && layout_differs) {
686 linker_error(prog,
687 "If gl_FragDepth is redeclared with a layout "
688 "qualifier in any fragment shader, it must be "
689 "redeclared with the same layout qualifier in "
690 "all fragment shaders that have assignments to "
691 "gl_FragDepth");
692 }
693 }
694
695 /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
696 *
697 * "If a shared global has multiple initializers, the
698 * initializers must all be constant expressions, and they
699 * must all have the same value. Otherwise, a link error will
700 * result. (A shared global having only one initializer does
701 * not require that initializer to be a constant expression.)"
702 *
703 * Previous to 4.20 the GLSL spec simply said that initializers
704 * must have the same value. In this case of non-constant
705 * initializers, this was impossible to determine. As a result,
706 * no vendor actually implemented that behavior. The 4.20
707 * behavior matches the implemented behavior of at least one other
708 * vendor, so we'll implement that for all GLSL versions.
709 */
710 if (var->constant_initializer != NULL) {
711 if (existing->constant_initializer != NULL) {
712 if (!var->constant_initializer->has_value(existing->constant_initializer)) {
713 linker_error(prog, "initializers for %s "
714 "`%s' have differing values\n",
715 mode_string(var), var->name);
716 return;
717 }
718 } else {
719 /* If the first-seen instance of a particular uniform did not
720 * have an initializer but a later instance does, copy the
721 * initializer to the version stored in the symbol table.
722 */
723 /* FINISHME: This is wrong. The constant_value field should
724 * FINISHME: not be modified! Imagine a case where a shader
725 * FINISHME: without an initializer is linked in two different
726 * FINISHME: programs with shaders that have differing
727 * FINISHME: initializers. Linking with the first will
728 * FINISHME: modify the shader, and linking with the second
729 * FINISHME: will fail.
730 */
731 existing->constant_initializer =
732 var->constant_initializer->clone(ralloc_parent(existing),
733 NULL);
734 }
735 }
736
737 if (var->has_initializer) {
738 if (existing->has_initializer
739 && (var->constant_initializer == NULL
740 || existing->constant_initializer == NULL)) {
741 linker_error(prog,
742 "shared global variable `%s' has multiple "
743 "non-constant initializers.\n",
744 var->name);
745 return;
746 }
747
748 /* Some instance had an initializer, so keep track of that. In
749 * this location, all sorts of initializers (constant or
750 * otherwise) will propagate the existence to the variable
751 * stored in the symbol table.
752 */
753 existing->has_initializer = true;
754 }
755
756 if (existing->invariant != var->invariant) {
757 linker_error(prog, "declarations for %s `%s' have "
758 "mismatching invariant qualifiers\n",
759 mode_string(var), var->name);
760 return;
761 }
762 if (existing->centroid != var->centroid) {
763 linker_error(prog, "declarations for %s `%s' have "
764 "mismatching centroid qualifiers\n",
765 mode_string(var), var->name);
766 return;
767 }
768 } else
769 variables.add_variable(var);
770 }
771 }
772 }
773
774
775 /**
776 * Perform validation of uniforms used across multiple shader stages
777 */
778 void
779 cross_validate_uniforms(struct gl_shader_program *prog)
780 {
781 cross_validate_globals(prog, prog->_LinkedShaders,
782 MESA_SHADER_TYPES, true);
783 }
784
785 /**
786 * Accumulates the array of prog->UniformBlocks and checks that all
787 * definitons of blocks agree on their contents.
788 */
789 static bool
790 interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog)
791 {
792 unsigned max_num_uniform_blocks = 0;
793 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
794 if (prog->_LinkedShaders[i])
795 max_num_uniform_blocks += prog->_LinkedShaders[i]->NumUniformBlocks;
796 }
797
798 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
799 struct gl_shader *sh = prog->_LinkedShaders[i];
800
801 prog->UniformBlockStageIndex[i] = ralloc_array(prog, int,
802 max_num_uniform_blocks);
803 for (unsigned int j = 0; j < max_num_uniform_blocks; j++)
804 prog->UniformBlockStageIndex[i][j] = -1;
805
806 if (sh == NULL)
807 continue;
808
809 for (unsigned int j = 0; j < sh->NumUniformBlocks; j++) {
810 int index = link_cross_validate_uniform_block(prog,
811 &prog->UniformBlocks,
812 &prog->NumUniformBlocks,
813 &sh->UniformBlocks[j]);
814
815 if (index == -1) {
816 linker_error(prog, "uniform block `%s' has mismatching definitions",
817 sh->UniformBlocks[j].Name);
818 return false;
819 }
820
821 prog->UniformBlockStageIndex[i][index] = j;
822 }
823 }
824
825 return true;
826 }
827
828
829 /**
830 * Populates a shaders symbol table with all global declarations
831 */
832 static void
833 populate_symbol_table(gl_shader *sh)
834 {
835 sh->symbols = new(sh) glsl_symbol_table;
836
837 foreach_list(node, sh->ir) {
838 ir_instruction *const inst = (ir_instruction *) node;
839 ir_variable *var;
840 ir_function *func;
841
842 if ((func = inst->as_function()) != NULL) {
843 sh->symbols->add_function(func);
844 } else if ((var = inst->as_variable()) != NULL) {
845 sh->symbols->add_variable(var);
846 }
847 }
848 }
849
850
851 /**
852 * Remap variables referenced in an instruction tree
853 *
854 * This is used when instruction trees are cloned from one shader and placed in
855 * another. These trees will contain references to \c ir_variable nodes that
856 * do not exist in the target shader. This function finds these \c ir_variable
857 * references and replaces the references with matching variables in the target
858 * shader.
859 *
860 * If there is no matching variable in the target shader, a clone of the
861 * \c ir_variable is made and added to the target shader. The new variable is
862 * added to \b both the instruction stream and the symbol table.
863 *
864 * \param inst IR tree that is to be processed.
865 * \param symbols Symbol table containing global scope symbols in the
866 * linked shader.
867 * \param instructions Instruction stream where new variable declarations
868 * should be added.
869 */
870 void
871 remap_variables(ir_instruction *inst, struct gl_shader *target,
872 hash_table *temps)
873 {
874 class remap_visitor : public ir_hierarchical_visitor {
875 public:
876 remap_visitor(struct gl_shader *target,
877 hash_table *temps)
878 {
879 this->target = target;
880 this->symbols = target->symbols;
881 this->instructions = target->ir;
882 this->temps = temps;
883 }
884
885 virtual ir_visitor_status visit(ir_dereference_variable *ir)
886 {
887 if (ir->var->mode == ir_var_temporary) {
888 ir_variable *var = (ir_variable *) hash_table_find(temps, ir->var);
889
890 assert(var != NULL);
891 ir->var = var;
892 return visit_continue;
893 }
894
895 ir_variable *const existing =
896 this->symbols->get_variable(ir->var->name);
897 if (existing != NULL)
898 ir->var = existing;
899 else {
900 ir_variable *copy = ir->var->clone(this->target, NULL);
901
902 this->symbols->add_variable(copy);
903 this->instructions->push_head(copy);
904 ir->var = copy;
905 }
906
907 return visit_continue;
908 }
909
910 private:
911 struct gl_shader *target;
912 glsl_symbol_table *symbols;
913 exec_list *instructions;
914 hash_table *temps;
915 };
916
917 remap_visitor v(target, temps);
918
919 inst->accept(&v);
920 }
921
922
923 /**
924 * Move non-declarations from one instruction stream to another
925 *
926 * The intended usage pattern of this function is to pass the pointer to the
927 * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
928 * pointer) for \c last and \c false for \c make_copies on the first
929 * call. Successive calls pass the return value of the previous call for
930 * \c last and \c true for \c make_copies.
931 *
932 * \param instructions Source instruction stream
933 * \param last Instruction after which new instructions should be
934 * inserted in the target instruction stream
935 * \param make_copies Flag selecting whether instructions in \c instructions
936 * should be copied (via \c ir_instruction::clone) into the
937 * target list or moved.
938 *
939 * \return
940 * The new "last" instruction in the target instruction stream. This pointer
941 * is suitable for use as the \c last parameter of a later call to this
942 * function.
943 */
944 exec_node *
945 move_non_declarations(exec_list *instructions, exec_node *last,
946 bool make_copies, gl_shader *target)
947 {
948 hash_table *temps = NULL;
949
950 if (make_copies)
951 temps = hash_table_ctor(0, hash_table_pointer_hash,
952 hash_table_pointer_compare);
953
954 foreach_list_safe(node, instructions) {
955 ir_instruction *inst = (ir_instruction *) node;
956
957 if (inst->as_function())
958 continue;
959
960 ir_variable *var = inst->as_variable();
961 if ((var != NULL) && (var->mode != ir_var_temporary))
962 continue;
963
964 assert(inst->as_assignment()
965 || inst->as_call()
966 || inst->as_if() /* for initializers with the ?: operator */
967 || ((var != NULL) && (var->mode == ir_var_temporary)));
968
969 if (make_copies) {
970 inst = inst->clone(target, NULL);
971
972 if (var != NULL)
973 hash_table_insert(temps, inst, var);
974 else
975 remap_variables(inst, target, temps);
976 } else {
977 inst->remove();
978 }
979
980 last->insert_after(inst);
981 last = inst;
982 }
983
984 if (make_copies)
985 hash_table_dtor(temps);
986
987 return last;
988 }
989
990 /**
991 * Get the function signature for main from a shader
992 */
993 static ir_function_signature *
994 get_main_function_signature(gl_shader *sh)
995 {
996 ir_function *const f = sh->symbols->get_function("main");
997 if (f != NULL) {
998 exec_list void_parameters;
999
1000 /* Look for the 'void main()' signature and ensure that it's defined.
1001 * This keeps the linker from accidentally pick a shader that just
1002 * contains a prototype for main.
1003 *
1004 * We don't have to check for multiple definitions of main (in multiple
1005 * shaders) because that would have already been caught above.
1006 */
1007 ir_function_signature *sig = f->matching_signature(NULL, &void_parameters);
1008 if ((sig != NULL) && sig->is_defined) {
1009 return sig;
1010 }
1011 }
1012
1013 return NULL;
1014 }
1015
1016
1017 /**
1018 * This class is only used in link_intrastage_shaders() below but declaring
1019 * it inside that function leads to compiler warnings with some versions of
1020 * gcc.
1021 */
1022 class array_sizing_visitor : public ir_hierarchical_visitor {
1023 public:
1024 array_sizing_visitor()
1025 : mem_ctx(ralloc_context(NULL)),
1026 unnamed_interfaces(hash_table_ctor(0, hash_table_pointer_hash,
1027 hash_table_pointer_compare))
1028 {
1029 }
1030
1031 ~array_sizing_visitor()
1032 {
1033 hash_table_dtor(this->unnamed_interfaces);
1034 ralloc_free(this->mem_ctx);
1035 }
1036
1037 virtual ir_visitor_status visit(ir_variable *var)
1038 {
1039 fixup_type(&var->type, var->max_array_access);
1040 if (var->type->is_interface()) {
1041 if (interface_contains_unsized_arrays(var->type)) {
1042 const glsl_type *new_type =
1043 resize_interface_members(var->type, var->max_ifc_array_access);
1044 var->type = new_type;
1045 var->change_interface_type(new_type);
1046 }
1047 } else if (var->type->is_array() &&
1048 var->type->fields.array->is_interface()) {
1049 if (interface_contains_unsized_arrays(var->type->fields.array)) {
1050 const glsl_type *new_type =
1051 resize_interface_members(var->type->fields.array,
1052 var->max_ifc_array_access);
1053 var->change_interface_type(new_type);
1054 var->type =
1055 glsl_type::get_array_instance(new_type, var->type->length);
1056 }
1057 } else if (const glsl_type *ifc_type = var->get_interface_type()) {
1058 /* Store a pointer to the variable in the unnamed_interfaces
1059 * hashtable.
1060 */
1061 ir_variable **interface_vars = (ir_variable **)
1062 hash_table_find(this->unnamed_interfaces, ifc_type);
1063 if (interface_vars == NULL) {
1064 interface_vars = rzalloc_array(mem_ctx, ir_variable *,
1065 ifc_type->length);
1066 hash_table_insert(this->unnamed_interfaces, interface_vars,
1067 ifc_type);
1068 }
1069 unsigned index = ifc_type->field_index(var->name);
1070 assert(index < ifc_type->length);
1071 assert(interface_vars[index] == NULL);
1072 interface_vars[index] = var;
1073 }
1074 return visit_continue;
1075 }
1076
1077 /**
1078 * For each unnamed interface block that was discovered while running the
1079 * visitor, adjust the interface type to reflect the newly assigned array
1080 * sizes, and fix up the ir_variable nodes to point to the new interface
1081 * type.
1082 */
1083 void fixup_unnamed_interface_types()
1084 {
1085 hash_table_call_foreach(this->unnamed_interfaces,
1086 fixup_unnamed_interface_type, NULL);
1087 }
1088
1089 private:
1090 /**
1091 * If the type pointed to by \c type represents an unsized array, replace
1092 * it with a sized array whose size is determined by max_array_access.
1093 */
1094 static void fixup_type(const glsl_type **type, unsigned max_array_access)
1095 {
1096 if ((*type)->is_unsized_array()) {
1097 *type = glsl_type::get_array_instance((*type)->fields.array,
1098 max_array_access + 1);
1099 assert(*type != NULL);
1100 }
1101 }
1102
1103 /**
1104 * Determine whether the given interface type contains unsized arrays (if
1105 * it doesn't, array_sizing_visitor doesn't need to process it).
1106 */
1107 static bool interface_contains_unsized_arrays(const glsl_type *type)
1108 {
1109 for (unsigned i = 0; i < type->length; i++) {
1110 const glsl_type *elem_type = type->fields.structure[i].type;
1111 if (elem_type->is_unsized_array())
1112 return true;
1113 }
1114 return false;
1115 }
1116
1117 /**
1118 * Create a new interface type based on the given type, with unsized arrays
1119 * replaced by sized arrays whose size is determined by
1120 * max_ifc_array_access.
1121 */
1122 static const glsl_type *
1123 resize_interface_members(const glsl_type *type,
1124 const unsigned *max_ifc_array_access)
1125 {
1126 unsigned num_fields = type->length;
1127 glsl_struct_field *fields = new glsl_struct_field[num_fields];
1128 memcpy(fields, type->fields.structure,
1129 num_fields * sizeof(*fields));
1130 for (unsigned i = 0; i < num_fields; i++) {
1131 fixup_type(&fields[i].type, max_ifc_array_access[i]);
1132 }
1133 glsl_interface_packing packing =
1134 (glsl_interface_packing) type->interface_packing;
1135 const glsl_type *new_ifc_type =
1136 glsl_type::get_interface_instance(fields, num_fields,
1137 packing, type->name);
1138 delete [] fields;
1139 return new_ifc_type;
1140 }
1141
1142 static void fixup_unnamed_interface_type(const void *key, void *data,
1143 void *)
1144 {
1145 const glsl_type *ifc_type = (const glsl_type *) key;
1146 ir_variable **interface_vars = (ir_variable **) data;
1147 unsigned num_fields = ifc_type->length;
1148 glsl_struct_field *fields = new glsl_struct_field[num_fields];
1149 memcpy(fields, ifc_type->fields.structure,
1150 num_fields * sizeof(*fields));
1151 bool interface_type_changed = false;
1152 for (unsigned i = 0; i < num_fields; i++) {
1153 if (interface_vars[i] != NULL &&
1154 fields[i].type != interface_vars[i]->type) {
1155 fields[i].type = interface_vars[i]->type;
1156 interface_type_changed = true;
1157 }
1158 }
1159 if (!interface_type_changed) {
1160 delete [] fields;
1161 return;
1162 }
1163 glsl_interface_packing packing =
1164 (glsl_interface_packing) ifc_type->interface_packing;
1165 const glsl_type *new_ifc_type =
1166 glsl_type::get_interface_instance(fields, num_fields, packing,
1167 ifc_type->name);
1168 delete [] fields;
1169 for (unsigned i = 0; i < num_fields; i++) {
1170 if (interface_vars[i] != NULL)
1171 interface_vars[i]->change_interface_type(new_ifc_type);
1172 }
1173 }
1174
1175 /**
1176 * Memory context used to allocate the data in \c unnamed_interfaces.
1177 */
1178 void *mem_ctx;
1179
1180 /**
1181 * Hash table from const glsl_type * to an array of ir_variable *'s
1182 * pointing to the ir_variables constituting each unnamed interface block.
1183 */
1184 hash_table *unnamed_interfaces;
1185 };
1186
1187 /**
1188 * Performs the cross-validation of geometry shader max_vertices and
1189 * primitive type layout qualifiers for the attached geometry shaders,
1190 * and propagates them to the linked GS and linked shader program.
1191 */
1192 static void
1193 link_gs_inout_layout_qualifiers(struct gl_shader_program *prog,
1194 struct gl_shader *linked_shader,
1195 struct gl_shader **shader_list,
1196 unsigned num_shaders)
1197 {
1198 linked_shader->Geom.VerticesOut = 0;
1199 linked_shader->Geom.InputType = PRIM_UNKNOWN;
1200 linked_shader->Geom.OutputType = PRIM_UNKNOWN;
1201
1202 /* No in/out qualifiers defined for anything but GLSL 1.50+
1203 * geometry shaders so far.
1204 */
1205 if (linked_shader->Type != GL_GEOMETRY_SHADER || prog->Version < 150)
1206 return;
1207
1208 /* From the GLSL 1.50 spec, page 46:
1209 *
1210 * "All geometry shader output layout declarations in a program
1211 * must declare the same layout and same value for
1212 * max_vertices. There must be at least one geometry output
1213 * layout declaration somewhere in a program, but not all
1214 * geometry shaders (compilation units) are required to
1215 * declare it."
1216 */
1217
1218 for (unsigned i = 0; i < num_shaders; i++) {
1219 struct gl_shader *shader = shader_list[i];
1220
1221 if (shader->Geom.InputType != PRIM_UNKNOWN) {
1222 if (linked_shader->Geom.InputType != PRIM_UNKNOWN &&
1223 linked_shader->Geom.InputType != shader->Geom.InputType) {
1224 linker_error(prog, "geometry shader defined with conflicting "
1225 "input types\n");
1226 return;
1227 }
1228 linked_shader->Geom.InputType = shader->Geom.InputType;
1229 }
1230
1231 if (shader->Geom.OutputType != PRIM_UNKNOWN) {
1232 if (linked_shader->Geom.OutputType != PRIM_UNKNOWN &&
1233 linked_shader->Geom.OutputType != shader->Geom.OutputType) {
1234 linker_error(prog, "geometry shader defined with conflicting "
1235 "output types\n");
1236 return;
1237 }
1238 linked_shader->Geom.OutputType = shader->Geom.OutputType;
1239 }
1240
1241 if (shader->Geom.VerticesOut != 0) {
1242 if (linked_shader->Geom.VerticesOut != 0 &&
1243 linked_shader->Geom.VerticesOut != shader->Geom.VerticesOut) {
1244 linker_error(prog, "geometry shader defined with conflicting "
1245 "output vertex count (%d and %d)\n",
1246 linked_shader->Geom.VerticesOut,
1247 shader->Geom.VerticesOut);
1248 return;
1249 }
1250 linked_shader->Geom.VerticesOut = shader->Geom.VerticesOut;
1251 }
1252 }
1253
1254 /* Just do the intrastage -> interstage propagation right now,
1255 * since we already know we're in the right type of shader program
1256 * for doing it.
1257 */
1258 if (linked_shader->Geom.InputType == PRIM_UNKNOWN) {
1259 linker_error(prog,
1260 "geometry shader didn't declare primitive input type\n");
1261 return;
1262 }
1263 prog->Geom.InputType = linked_shader->Geom.InputType;
1264
1265 if (linked_shader->Geom.OutputType == PRIM_UNKNOWN) {
1266 linker_error(prog,
1267 "geometry shader didn't declare primitive output type\n");
1268 return;
1269 }
1270 prog->Geom.OutputType = linked_shader->Geom.OutputType;
1271
1272 if (linked_shader->Geom.VerticesOut == 0) {
1273 linker_error(prog,
1274 "geometry shader didn't declare max_vertices\n");
1275 return;
1276 }
1277 prog->Geom.VerticesOut = linked_shader->Geom.VerticesOut;
1278 }
1279
1280 /**
1281 * Combine a group of shaders for a single stage to generate a linked shader
1282 *
1283 * \note
1284 * If this function is supplied a single shader, it is cloned, and the new
1285 * shader is returned.
1286 */
1287 static struct gl_shader *
1288 link_intrastage_shaders(void *mem_ctx,
1289 struct gl_context *ctx,
1290 struct gl_shader_program *prog,
1291 struct gl_shader **shader_list,
1292 unsigned num_shaders)
1293 {
1294 struct gl_uniform_block *uniform_blocks = NULL;
1295
1296 /* Check that global variables defined in multiple shaders are consistent.
1297 */
1298 cross_validate_globals(prog, shader_list, num_shaders, false);
1299 if (!prog->LinkStatus)
1300 return NULL;
1301
1302 /* Check that interface blocks defined in multiple shaders are consistent.
1303 */
1304 validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
1305 num_shaders);
1306 if (!prog->LinkStatus)
1307 return NULL;
1308
1309 /* Link up uniform blocks defined within this stage. */
1310 const unsigned num_uniform_blocks =
1311 link_uniform_blocks(mem_ctx, prog, shader_list, num_shaders,
1312 &uniform_blocks);
1313
1314 /* Check that there is only a single definition of each function signature
1315 * across all shaders.
1316 */
1317 for (unsigned i = 0; i < (num_shaders - 1); i++) {
1318 foreach_list(node, shader_list[i]->ir) {
1319 ir_function *const f = ((ir_instruction *) node)->as_function();
1320
1321 if (f == NULL)
1322 continue;
1323
1324 for (unsigned j = i + 1; j < num_shaders; j++) {
1325 ir_function *const other =
1326 shader_list[j]->symbols->get_function(f->name);
1327
1328 /* If the other shader has no function (and therefore no function
1329 * signatures) with the same name, skip to the next shader.
1330 */
1331 if (other == NULL)
1332 continue;
1333
1334 foreach_iter (exec_list_iterator, iter, *f) {
1335 ir_function_signature *sig =
1336 (ir_function_signature *) iter.get();
1337
1338 if (!sig->is_defined || sig->is_builtin())
1339 continue;
1340
1341 ir_function_signature *other_sig =
1342 other->exact_matching_signature(NULL, &sig->parameters);
1343
1344 if ((other_sig != NULL) && other_sig->is_defined
1345 && !other_sig->is_builtin()) {
1346 linker_error(prog, "function `%s' is multiply defined",
1347 f->name);
1348 return NULL;
1349 }
1350 }
1351 }
1352 }
1353 }
1354
1355 /* Find the shader that defines main, and make a clone of it.
1356 *
1357 * Starting with the clone, search for undefined references. If one is
1358 * found, find the shader that defines it. Clone the reference and add
1359 * it to the shader. Repeat until there are no undefined references or
1360 * until a reference cannot be resolved.
1361 */
1362 gl_shader *main = NULL;
1363 for (unsigned i = 0; i < num_shaders; i++) {
1364 if (get_main_function_signature(shader_list[i]) != NULL) {
1365 main = shader_list[i];
1366 break;
1367 }
1368 }
1369
1370 if (main == NULL) {
1371 linker_error(prog, "%s shader lacks `main'\n",
1372 _mesa_glsl_shader_target_name(shader_list[0]->Type));
1373 return NULL;
1374 }
1375
1376 gl_shader *linked = ctx->Driver.NewShader(NULL, 0, main->Type);
1377 linked->ir = new(linked) exec_list;
1378 clone_ir_list(mem_ctx, linked->ir, main->ir);
1379
1380 linked->UniformBlocks = uniform_blocks;
1381 linked->NumUniformBlocks = num_uniform_blocks;
1382 ralloc_steal(linked, linked->UniformBlocks);
1383
1384 link_gs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
1385
1386 populate_symbol_table(linked);
1387
1388 /* The a pointer to the main function in the final linked shader (i.e., the
1389 * copy of the original shader that contained the main function).
1390 */
1391 ir_function_signature *const main_sig = get_main_function_signature(linked);
1392
1393 /* Move any instructions other than variable declarations or function
1394 * declarations into main.
1395 */
1396 exec_node *insertion_point =
1397 move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
1398 linked);
1399
1400 for (unsigned i = 0; i < num_shaders; i++) {
1401 if (shader_list[i] == main)
1402 continue;
1403
1404 insertion_point = move_non_declarations(shader_list[i]->ir,
1405 insertion_point, true, linked);
1406 }
1407
1408 /* Resolve initializers for global variables in the linked shader.
1409 */
1410 unsigned num_linking_shaders = num_shaders;
1411 for (unsigned i = 0; i < num_shaders; i++)
1412 num_linking_shaders += shader_list[i]->num_builtins_to_link;
1413
1414 gl_shader **linking_shaders =
1415 (gl_shader **) calloc(num_linking_shaders, sizeof(gl_shader *));
1416
1417 memcpy(linking_shaders, shader_list,
1418 sizeof(linking_shaders[0]) * num_shaders);
1419
1420 unsigned idx = num_shaders;
1421 for (unsigned i = 0; i < num_shaders; i++) {
1422 memcpy(&linking_shaders[idx], shader_list[i]->builtins_to_link,
1423 sizeof(linking_shaders[0]) * shader_list[i]->num_builtins_to_link);
1424 idx += shader_list[i]->num_builtins_to_link;
1425 }
1426
1427 assert(idx == num_linking_shaders);
1428
1429 if (!link_function_calls(prog, linked, linking_shaders,
1430 num_linking_shaders)) {
1431 ctx->Driver.DeleteShader(ctx, linked);
1432 free(linking_shaders);
1433 return NULL;
1434 }
1435
1436 free(linking_shaders);
1437
1438 /* At this point linked should contain all of the linked IR, so
1439 * validate it to make sure nothing went wrong.
1440 */
1441 validate_ir_tree(linked->ir);
1442
1443 /* Set the size of geometry shader input arrays */
1444 if (linked->Type == GL_GEOMETRY_SHADER) {
1445 unsigned num_vertices = vertices_per_prim(prog->Geom.InputType);
1446 geom_array_resize_visitor input_resize_visitor(num_vertices, prog);
1447 foreach_iter(exec_list_iterator, iter, *linked->ir) {
1448 ir_instruction *ir = (ir_instruction *)iter.get();
1449 ir->accept(&input_resize_visitor);
1450 }
1451 }
1452
1453 /* Make a pass over all variable declarations to ensure that arrays with
1454 * unspecified sizes have a size specified. The size is inferred from the
1455 * max_array_access field.
1456 */
1457 array_sizing_visitor v;
1458 v.run(linked->ir);
1459 v.fixup_unnamed_interface_types();
1460
1461 return linked;
1462 }
1463
1464 /**
1465 * Update the sizes of linked shader uniform arrays to the maximum
1466 * array index used.
1467 *
1468 * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
1469 *
1470 * If one or more elements of an array are active,
1471 * GetActiveUniform will return the name of the array in name,
1472 * subject to the restrictions listed above. The type of the array
1473 * is returned in type. The size parameter contains the highest
1474 * array element index used, plus one. The compiler or linker
1475 * determines the highest index used. There will be only one
1476 * active uniform reported by the GL per uniform array.
1477
1478 */
1479 static void
1480 update_array_sizes(struct gl_shader_program *prog)
1481 {
1482 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
1483 if (prog->_LinkedShaders[i] == NULL)
1484 continue;
1485
1486 foreach_list(node, prog->_LinkedShaders[i]->ir) {
1487 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1488
1489 if ((var == NULL) || (var->mode != ir_var_uniform) ||
1490 !var->type->is_array())
1491 continue;
1492
1493 /* GL_ARB_uniform_buffer_object says that std140 uniforms
1494 * will not be eliminated. Since we always do std140, just
1495 * don't resize arrays in UBOs.
1496 *
1497 * Atomic counters are supposed to get deterministic
1498 * locations assigned based on the declaration ordering and
1499 * sizes, array compaction would mess that up.
1500 */
1501 if (var->is_in_uniform_block() || var->type->contains_atomic())
1502 continue;
1503
1504 unsigned int size = var->max_array_access;
1505 for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) {
1506 if (prog->_LinkedShaders[j] == NULL)
1507 continue;
1508
1509 foreach_list(node2, prog->_LinkedShaders[j]->ir) {
1510 ir_variable *other_var = ((ir_instruction *) node2)->as_variable();
1511 if (!other_var)
1512 continue;
1513
1514 if (strcmp(var->name, other_var->name) == 0 &&
1515 other_var->max_array_access > size) {
1516 size = other_var->max_array_access;
1517 }
1518 }
1519 }
1520
1521 if (size + 1 != var->type->length) {
1522 /* If this is a built-in uniform (i.e., it's backed by some
1523 * fixed-function state), adjust the number of state slots to
1524 * match the new array size. The number of slots per array entry
1525 * is not known. It seems safe to assume that the total number of
1526 * slots is an integer multiple of the number of array elements.
1527 * Determine the number of slots per array element by dividing by
1528 * the old (total) size.
1529 */
1530 if (var->num_state_slots > 0) {
1531 var->num_state_slots = (size + 1)
1532 * (var->num_state_slots / var->type->length);
1533 }
1534
1535 var->type = glsl_type::get_array_instance(var->type->fields.array,
1536 size + 1);
1537 /* FINISHME: We should update the types of array
1538 * dereferences of this variable now.
1539 */
1540 }
1541 }
1542 }
1543 }
1544
1545 /**
1546 * Find a contiguous set of available bits in a bitmask.
1547 *
1548 * \param used_mask Bits representing used (1) and unused (0) locations
1549 * \param needed_count Number of contiguous bits needed.
1550 *
1551 * \return
1552 * Base location of the available bits on success or -1 on failure.
1553 */
1554 int
1555 find_available_slots(unsigned used_mask, unsigned needed_count)
1556 {
1557 unsigned needed_mask = (1 << needed_count) - 1;
1558 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
1559
1560 /* The comparison to 32 is redundant, but without it GCC emits "warning:
1561 * cannot optimize possibly infinite loops" for the loop below.
1562 */
1563 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
1564 return -1;
1565
1566 for (int i = 0; i <= max_bit_to_test; i++) {
1567 if ((needed_mask & ~used_mask) == needed_mask)
1568 return i;
1569
1570 needed_mask <<= 1;
1571 }
1572
1573 return -1;
1574 }
1575
1576
1577 /**
1578 * Assign locations for either VS inputs for FS outputs
1579 *
1580 * \param prog Shader program whose variables need locations assigned
1581 * \param target_index Selector for the program target to receive location
1582 * assignmnets. Must be either \c MESA_SHADER_VERTEX or
1583 * \c MESA_SHADER_FRAGMENT.
1584 * \param max_index Maximum number of generic locations. This corresponds
1585 * to either the maximum number of draw buffers or the
1586 * maximum number of generic attributes.
1587 *
1588 * \return
1589 * If locations are successfully assigned, true is returned. Otherwise an
1590 * error is emitted to the shader link log and false is returned.
1591 */
1592 bool
1593 assign_attribute_or_color_locations(gl_shader_program *prog,
1594 unsigned target_index,
1595 unsigned max_index)
1596 {
1597 /* Mark invalid locations as being used.
1598 */
1599 unsigned used_locations = (max_index >= 32)
1600 ? ~0 : ~((1 << max_index) - 1);
1601
1602 assert((target_index == MESA_SHADER_VERTEX)
1603 || (target_index == MESA_SHADER_FRAGMENT));
1604
1605 gl_shader *const sh = prog->_LinkedShaders[target_index];
1606 if (sh == NULL)
1607 return true;
1608
1609 /* Operate in a total of four passes.
1610 *
1611 * 1. Invalidate the location assignments for all vertex shader inputs.
1612 *
1613 * 2. Assign locations for inputs that have user-defined (via
1614 * glBindVertexAttribLocation) locations and outputs that have
1615 * user-defined locations (via glBindFragDataLocation).
1616 *
1617 * 3. Sort the attributes without assigned locations by number of slots
1618 * required in decreasing order. Fragmentation caused by attribute
1619 * locations assigned by the application may prevent large attributes
1620 * from having enough contiguous space.
1621 *
1622 * 4. Assign locations to any inputs without assigned locations.
1623 */
1624
1625 const int generic_base = (target_index == MESA_SHADER_VERTEX)
1626 ? (int) VERT_ATTRIB_GENERIC0 : (int) FRAG_RESULT_DATA0;
1627
1628 const enum ir_variable_mode direction =
1629 (target_index == MESA_SHADER_VERTEX)
1630 ? ir_var_shader_in : ir_var_shader_out;
1631
1632
1633 /* Temporary storage for the set of attributes that need locations assigned.
1634 */
1635 struct temp_attr {
1636 unsigned slots;
1637 ir_variable *var;
1638
1639 /* Used below in the call to qsort. */
1640 static int compare(const void *a, const void *b)
1641 {
1642 const temp_attr *const l = (const temp_attr *) a;
1643 const temp_attr *const r = (const temp_attr *) b;
1644
1645 /* Reversed because we want a descending order sort below. */
1646 return r->slots - l->slots;
1647 }
1648 } to_assign[16];
1649
1650 unsigned num_attr = 0;
1651
1652 foreach_list(node, sh->ir) {
1653 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1654
1655 if ((var == NULL) || (var->mode != (unsigned) direction))
1656 continue;
1657
1658 if (var->explicit_location) {
1659 if ((var->location >= (int)(max_index + generic_base))
1660 || (var->location < 0)) {
1661 linker_error(prog,
1662 "invalid explicit location %d specified for `%s'\n",
1663 (var->location < 0)
1664 ? var->location : var->location - generic_base,
1665 var->name);
1666 return false;
1667 }
1668 } else if (target_index == MESA_SHADER_VERTEX) {
1669 unsigned binding;
1670
1671 if (prog->AttributeBindings->get(binding, var->name)) {
1672 assert(binding >= VERT_ATTRIB_GENERIC0);
1673 var->location = binding;
1674 var->is_unmatched_generic_inout = 0;
1675 }
1676 } else if (target_index == MESA_SHADER_FRAGMENT) {
1677 unsigned binding;
1678 unsigned index;
1679
1680 if (prog->FragDataBindings->get(binding, var->name)) {
1681 assert(binding >= FRAG_RESULT_DATA0);
1682 var->location = binding;
1683 var->is_unmatched_generic_inout = 0;
1684
1685 if (prog->FragDataIndexBindings->get(index, var->name)) {
1686 var->index = index;
1687 }
1688 }
1689 }
1690
1691 /* If the variable is not a built-in and has a location statically
1692 * assigned in the shader (presumably via a layout qualifier), make sure
1693 * that it doesn't collide with other assigned locations. Otherwise,
1694 * add it to the list of variables that need linker-assigned locations.
1695 */
1696 const unsigned slots = var->type->count_attribute_slots();
1697 if (var->location != -1) {
1698 if (var->location >= generic_base && var->index < 1) {
1699 /* From page 61 of the OpenGL 4.0 spec:
1700 *
1701 * "LinkProgram will fail if the attribute bindings assigned
1702 * by BindAttribLocation do not leave not enough space to
1703 * assign a location for an active matrix attribute or an
1704 * active attribute array, both of which require multiple
1705 * contiguous generic attributes."
1706 *
1707 * Previous versions of the spec contain similar language but omit
1708 * the bit about attribute arrays.
1709 *
1710 * Page 61 of the OpenGL 4.0 spec also says:
1711 *
1712 * "It is possible for an application to bind more than one
1713 * attribute name to the same location. This is referred to as
1714 * aliasing. This will only work if only one of the aliased
1715 * attributes is active in the executable program, or if no
1716 * path through the shader consumes more than one attribute of
1717 * a set of attributes aliased to the same location. A link
1718 * error can occur if the linker determines that every path
1719 * through the shader consumes multiple aliased attributes,
1720 * but implementations are not required to generate an error
1721 * in this case."
1722 *
1723 * These two paragraphs are either somewhat contradictory, or I
1724 * don't fully understand one or both of them.
1725 */
1726 /* FINISHME: The code as currently written does not support
1727 * FINISHME: attribute location aliasing (see comment above).
1728 */
1729 /* Mask representing the contiguous slots that will be used by
1730 * this attribute.
1731 */
1732 const unsigned attr = var->location - generic_base;
1733 const unsigned use_mask = (1 << slots) - 1;
1734
1735 /* Generate a link error if the set of bits requested for this
1736 * attribute overlaps any previously allocated bits.
1737 */
1738 if ((~(use_mask << attr) & used_locations) != used_locations) {
1739 const char *const string = (target_index == MESA_SHADER_VERTEX)
1740 ? "vertex shader input" : "fragment shader output";
1741 linker_error(prog,
1742 "insufficient contiguous locations "
1743 "available for %s `%s' %d %d %d", string,
1744 var->name, used_locations, use_mask, attr);
1745 return false;
1746 }
1747
1748 used_locations |= (use_mask << attr);
1749 }
1750
1751 continue;
1752 }
1753
1754 to_assign[num_attr].slots = slots;
1755 to_assign[num_attr].var = var;
1756 num_attr++;
1757 }
1758
1759 /* If all of the attributes were assigned locations by the application (or
1760 * are built-in attributes with fixed locations), return early. This should
1761 * be the common case.
1762 */
1763 if (num_attr == 0)
1764 return true;
1765
1766 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
1767
1768 if (target_index == MESA_SHADER_VERTEX) {
1769 /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS. It can
1770 * only be explicitly assigned by via glBindAttribLocation. Mark it as
1771 * reserved to prevent it from being automatically allocated below.
1772 */
1773 find_deref_visitor find("gl_Vertex");
1774 find.run(sh->ir);
1775 if (find.variable_found())
1776 used_locations |= (1 << 0);
1777 }
1778
1779 for (unsigned i = 0; i < num_attr; i++) {
1780 /* Mask representing the contiguous slots that will be used by this
1781 * attribute.
1782 */
1783 const unsigned use_mask = (1 << to_assign[i].slots) - 1;
1784
1785 int location = find_available_slots(used_locations, to_assign[i].slots);
1786
1787 if (location < 0) {
1788 const char *const string = (target_index == MESA_SHADER_VERTEX)
1789 ? "vertex shader input" : "fragment shader output";
1790
1791 linker_error(prog,
1792 "insufficient contiguous locations "
1793 "available for %s `%s'",
1794 string, to_assign[i].var->name);
1795 return false;
1796 }
1797
1798 to_assign[i].var->location = generic_base + location;
1799 to_assign[i].var->is_unmatched_generic_inout = 0;
1800 used_locations |= (use_mask << location);
1801 }
1802
1803 return true;
1804 }
1805
1806
1807 /**
1808 * Demote shader inputs and outputs that are not used in other stages
1809 */
1810 void
1811 demote_shader_inputs_and_outputs(gl_shader *sh, enum ir_variable_mode mode)
1812 {
1813 foreach_list(node, sh->ir) {
1814 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1815
1816 if ((var == NULL) || (var->mode != int(mode)))
1817 continue;
1818
1819 /* A shader 'in' or 'out' variable is only really an input or output if
1820 * its value is used by other shader stages. This will cause the variable
1821 * to have a location assigned.
1822 */
1823 if (var->is_unmatched_generic_inout) {
1824 var->mode = ir_var_auto;
1825 }
1826 }
1827 }
1828
1829
1830 /**
1831 * Store the gl_FragDepth layout in the gl_shader_program struct.
1832 */
1833 static void
1834 store_fragdepth_layout(struct gl_shader_program *prog)
1835 {
1836 if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
1837 return;
1838 }
1839
1840 struct exec_list *ir = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir;
1841
1842 /* We don't look up the gl_FragDepth symbol directly because if
1843 * gl_FragDepth is not used in the shader, it's removed from the IR.
1844 * However, the symbol won't be removed from the symbol table.
1845 *
1846 * We're only interested in the cases where the variable is NOT removed
1847 * from the IR.
1848 */
1849 foreach_list(node, ir) {
1850 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1851
1852 if (var == NULL || var->mode != ir_var_shader_out) {
1853 continue;
1854 }
1855
1856 if (strcmp(var->name, "gl_FragDepth") == 0) {
1857 switch (var->depth_layout) {
1858 case ir_depth_layout_none:
1859 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
1860 return;
1861 case ir_depth_layout_any:
1862 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
1863 return;
1864 case ir_depth_layout_greater:
1865 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
1866 return;
1867 case ir_depth_layout_less:
1868 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
1869 return;
1870 case ir_depth_layout_unchanged:
1871 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
1872 return;
1873 default:
1874 assert(0);
1875 return;
1876 }
1877 }
1878 }
1879 }
1880
1881 /**
1882 * Validate the resources used by a program versus the implementation limits
1883 */
1884 static void
1885 check_resources(struct gl_context *ctx, struct gl_shader_program *prog)
1886 {
1887 static const char *const shader_names[MESA_SHADER_TYPES] = {
1888 "vertex", "geometry", "fragment"
1889 };
1890
1891 const unsigned max_samplers[MESA_SHADER_TYPES] = {
1892 ctx->Const.VertexProgram.MaxTextureImageUnits,
1893 ctx->Const.GeometryProgram.MaxTextureImageUnits,
1894 ctx->Const.FragmentProgram.MaxTextureImageUnits
1895 };
1896
1897 const unsigned max_default_uniform_components[MESA_SHADER_TYPES] = {
1898 ctx->Const.VertexProgram.MaxUniformComponents,
1899 ctx->Const.GeometryProgram.MaxUniformComponents,
1900 ctx->Const.FragmentProgram.MaxUniformComponents
1901 };
1902
1903 const unsigned max_combined_uniform_components[MESA_SHADER_TYPES] = {
1904 ctx->Const.VertexProgram.MaxCombinedUniformComponents,
1905 ctx->Const.GeometryProgram.MaxCombinedUniformComponents,
1906 ctx->Const.FragmentProgram.MaxCombinedUniformComponents
1907 };
1908
1909 const unsigned max_uniform_blocks[MESA_SHADER_TYPES] = {
1910 ctx->Const.VertexProgram.MaxUniformBlocks,
1911 ctx->Const.GeometryProgram.MaxUniformBlocks,
1912 ctx->Const.FragmentProgram.MaxUniformBlocks
1913 };
1914
1915 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
1916 struct gl_shader *sh = prog->_LinkedShaders[i];
1917
1918 if (sh == NULL)
1919 continue;
1920
1921 if (sh->num_samplers > max_samplers[i]) {
1922 linker_error(prog, "Too many %s shader texture samplers",
1923 shader_names[i]);
1924 }
1925
1926 if (sh->num_uniform_components > max_default_uniform_components[i]) {
1927 if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
1928 linker_warning(prog, "Too many %s shader default uniform block "
1929 "components, but the driver will try to optimize "
1930 "them out; this is non-portable out-of-spec "
1931 "behavior\n",
1932 shader_names[i]);
1933 } else {
1934 linker_error(prog, "Too many %s shader default uniform block "
1935 "components",
1936 shader_names[i]);
1937 }
1938 }
1939
1940 if (sh->num_combined_uniform_components >
1941 max_combined_uniform_components[i]) {
1942 if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
1943 linker_warning(prog, "Too many %s shader uniform components, "
1944 "but the driver will try to optimize them out; "
1945 "this is non-portable out-of-spec behavior\n",
1946 shader_names[i]);
1947 } else {
1948 linker_error(prog, "Too many %s shader uniform components",
1949 shader_names[i]);
1950 }
1951 }
1952 }
1953
1954 unsigned blocks[MESA_SHADER_TYPES] = {0};
1955 unsigned total_uniform_blocks = 0;
1956
1957 for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
1958 for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) {
1959 if (prog->UniformBlockStageIndex[j][i] != -1) {
1960 blocks[j]++;
1961 total_uniform_blocks++;
1962 }
1963 }
1964
1965 if (total_uniform_blocks > ctx->Const.MaxCombinedUniformBlocks) {
1966 linker_error(prog, "Too many combined uniform blocks (%d/%d)",
1967 prog->NumUniformBlocks,
1968 ctx->Const.MaxCombinedUniformBlocks);
1969 } else {
1970 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
1971 if (blocks[i] > max_uniform_blocks[i]) {
1972 linker_error(prog, "Too many %s uniform blocks (%d/%d)",
1973 shader_names[i],
1974 blocks[i],
1975 max_uniform_blocks[i]);
1976 break;
1977 }
1978 }
1979 }
1980 }
1981 }
1982
1983 void
1984 link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
1985 {
1986 tfeedback_decl *tfeedback_decls = NULL;
1987 unsigned num_tfeedback_decls = prog->TransformFeedback.NumVarying;
1988
1989 void *mem_ctx = ralloc_context(NULL); // temporary linker context
1990
1991 prog->LinkStatus = true; /* All error paths will set this to false */
1992 prog->Validated = false;
1993 prog->_Used = false;
1994
1995 ralloc_free(prog->InfoLog);
1996 prog->InfoLog = ralloc_strdup(NULL, "");
1997
1998 ralloc_free(prog->UniformBlocks);
1999 prog->UniformBlocks = NULL;
2000 prog->NumUniformBlocks = 0;
2001 for (int i = 0; i < MESA_SHADER_TYPES; i++) {
2002 ralloc_free(prog->UniformBlockStageIndex[i]);
2003 prog->UniformBlockStageIndex[i] = NULL;
2004 }
2005
2006 ralloc_free(prog->AtomicBuffers);
2007 prog->AtomicBuffers = NULL;
2008 prog->NumAtomicBuffers = 0;
2009
2010 /* Separate the shaders into groups based on their type.
2011 */
2012 struct gl_shader **vert_shader_list;
2013 unsigned num_vert_shaders = 0;
2014 struct gl_shader **frag_shader_list;
2015 unsigned num_frag_shaders = 0;
2016 struct gl_shader **geom_shader_list;
2017 unsigned num_geom_shaders = 0;
2018
2019 vert_shader_list = (struct gl_shader **)
2020 calloc(prog->NumShaders, sizeof(struct gl_shader *));
2021 frag_shader_list = (struct gl_shader **)
2022 calloc(prog->NumShaders, sizeof(struct gl_shader *));
2023 geom_shader_list = (struct gl_shader **)
2024 calloc(prog->NumShaders, sizeof(struct gl_shader *));
2025
2026 unsigned min_version = UINT_MAX;
2027 unsigned max_version = 0;
2028 const bool is_es_prog =
2029 (prog->NumShaders > 0 && prog->Shaders[0]->IsES) ? true : false;
2030 for (unsigned i = 0; i < prog->NumShaders; i++) {
2031 min_version = MIN2(min_version, prog->Shaders[i]->Version);
2032 max_version = MAX2(max_version, prog->Shaders[i]->Version);
2033
2034 if (prog->Shaders[i]->IsES != is_es_prog) {
2035 linker_error(prog, "all shaders must use same shading "
2036 "language version\n");
2037 goto done;
2038 }
2039
2040 switch (prog->Shaders[i]->Type) {
2041 case GL_VERTEX_SHADER:
2042 vert_shader_list[num_vert_shaders] = prog->Shaders[i];
2043 num_vert_shaders++;
2044 break;
2045 case GL_FRAGMENT_SHADER:
2046 frag_shader_list[num_frag_shaders] = prog->Shaders[i];
2047 num_frag_shaders++;
2048 break;
2049 case GL_GEOMETRY_SHADER:
2050 geom_shader_list[num_geom_shaders] = prog->Shaders[i];
2051 num_geom_shaders++;
2052 break;
2053 }
2054 }
2055
2056 /* In desktop GLSL, different shader versions may be linked together. In
2057 * GLSL ES, all shader versions must be the same.
2058 */
2059 if (is_es_prog && min_version != max_version) {
2060 linker_error(prog, "all shaders must use same shading "
2061 "language version\n");
2062 goto done;
2063 }
2064
2065 prog->Version = max_version;
2066 prog->IsES = is_es_prog;
2067
2068 /* Geometry shaders have to be linked with vertex shaders.
2069 */
2070 if (num_geom_shaders > 0 && num_vert_shaders == 0) {
2071 linker_error(prog, "Geometry shader must be linked with "
2072 "vertex shader\n");
2073 goto done;
2074 }
2075
2076 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2077 if (prog->_LinkedShaders[i] != NULL)
2078 ctx->Driver.DeleteShader(ctx, prog->_LinkedShaders[i]);
2079
2080 prog->_LinkedShaders[i] = NULL;
2081 }
2082
2083 /* Link all shaders for a particular stage and validate the result.
2084 */
2085 if (num_vert_shaders > 0) {
2086 gl_shader *const sh =
2087 link_intrastage_shaders(mem_ctx, ctx, prog, vert_shader_list,
2088 num_vert_shaders);
2089
2090 if (!prog->LinkStatus)
2091 goto done;
2092
2093 validate_vertex_shader_executable(prog, sh);
2094 if (!prog->LinkStatus)
2095 goto done;
2096 prog->LastClipDistanceArraySize = prog->Vert.ClipDistanceArraySize;
2097
2098 _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_VERTEX],
2099 sh);
2100 }
2101
2102 if (num_frag_shaders > 0) {
2103 gl_shader *const sh =
2104 link_intrastage_shaders(mem_ctx, ctx, prog, frag_shader_list,
2105 num_frag_shaders);
2106
2107 if (!prog->LinkStatus)
2108 goto done;
2109
2110 validate_fragment_shader_executable(prog, sh);
2111 if (!prog->LinkStatus)
2112 goto done;
2113
2114 _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
2115 sh);
2116 }
2117
2118 if (num_geom_shaders > 0) {
2119 gl_shader *const sh =
2120 link_intrastage_shaders(mem_ctx, ctx, prog, geom_shader_list,
2121 num_geom_shaders);
2122
2123 if (!prog->LinkStatus)
2124 goto done;
2125
2126 validate_geometry_shader_executable(prog, sh);
2127 if (!prog->LinkStatus)
2128 goto done;
2129 prog->LastClipDistanceArraySize = prog->Geom.ClipDistanceArraySize;
2130
2131 _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_GEOMETRY],
2132 sh);
2133 }
2134
2135 /* Here begins the inter-stage linking phase. Some initial validation is
2136 * performed, then locations are assigned for uniforms, attributes, and
2137 * varyings.
2138 */
2139 cross_validate_uniforms(prog);
2140 if (!prog->LinkStatus)
2141 goto done;
2142
2143 unsigned prev;
2144
2145 for (prev = 0; prev < MESA_SHADER_TYPES; prev++) {
2146 if (prog->_LinkedShaders[prev] != NULL)
2147 break;
2148 }
2149
2150 /* Validate the inputs of each stage with the output of the preceding
2151 * stage.
2152 */
2153 for (unsigned i = prev + 1; i < MESA_SHADER_TYPES; i++) {
2154 if (prog->_LinkedShaders[i] == NULL)
2155 continue;
2156
2157 validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
2158 prog->_LinkedShaders[i]);
2159 if (!prog->LinkStatus)
2160 goto done;
2161
2162 cross_validate_outputs_to_inputs(prog,
2163 prog->_LinkedShaders[prev],
2164 prog->_LinkedShaders[i]);
2165 if (!prog->LinkStatus)
2166 goto done;
2167
2168 prev = i;
2169 }
2170
2171 /* Cross-validate uniform blocks between shader stages */
2172 validate_interstage_uniform_blocks(prog, prog->_LinkedShaders,
2173 MESA_SHADER_TYPES);
2174 if (!prog->LinkStatus)
2175 goto done;
2176
2177 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2178 if (prog->_LinkedShaders[i] != NULL)
2179 lower_named_interface_blocks(mem_ctx, prog->_LinkedShaders[i]);
2180 }
2181
2182 /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
2183 * it before optimization because we want most of the checks to get
2184 * dropped thanks to constant propagation.
2185 *
2186 * This rule also applies to GLSL ES 3.00.
2187 */
2188 if (max_version >= (is_es_prog ? 300 : 130)) {
2189 struct gl_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
2190 if (sh) {
2191 lower_discard_flow(sh->ir);
2192 }
2193 }
2194
2195 if (!interstage_cross_validate_uniform_blocks(prog))
2196 goto done;
2197
2198 /* Do common optimization before assigning storage for attributes,
2199 * uniforms, and varyings. Later optimization could possibly make
2200 * some of that unused.
2201 */
2202 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
2203 if (prog->_LinkedShaders[i] == NULL)
2204 continue;
2205
2206 detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
2207 if (!prog->LinkStatus)
2208 goto done;
2209
2210 if (ctx->ShaderCompilerOptions[i].LowerClipDistance) {
2211 lower_clip_distance(prog->_LinkedShaders[i]);
2212 }
2213
2214 unsigned max_unroll = ctx->ShaderCompilerOptions[i].MaxUnrollIterations;
2215
2216 while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, false, max_unroll, &ctx->ShaderCompilerOptions[i]))
2217 ;
2218 }
2219
2220 /* Mark all generic shader inputs and outputs as unpaired. */
2221 if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) {
2222 link_invalidate_variable_locations(
2223 prog->_LinkedShaders[MESA_SHADER_VERTEX]->ir);
2224 }
2225 if (prog->_LinkedShaders[MESA_SHADER_GEOMETRY] != NULL) {
2226 link_invalidate_variable_locations(
2227 prog->_LinkedShaders[MESA_SHADER_GEOMETRY]->ir);
2228 }
2229 if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] != NULL) {
2230 link_invalidate_variable_locations(
2231 prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir);
2232 }
2233
2234 /* FINISHME: The value of the max_attribute_index parameter is
2235 * FINISHME: implementation dependent based on the value of
2236 * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
2237 * FINISHME: at least 16, so hardcode 16 for now.
2238 */
2239 if (!assign_attribute_or_color_locations(prog, MESA_SHADER_VERTEX, 16)) {
2240 goto done;
2241 }
2242
2243 if (!assign_attribute_or_color_locations(prog, MESA_SHADER_FRAGMENT, MAX2(ctx->Const.MaxDrawBuffers, ctx->Const.MaxDualSourceDrawBuffers))) {
2244 goto done;
2245 }
2246
2247 unsigned first;
2248 for (first = 0; first < MESA_SHADER_TYPES; first++) {
2249 if (prog->_LinkedShaders[first] != NULL)
2250 break;
2251 }
2252
2253 if (num_tfeedback_decls != 0) {
2254 /* From GL_EXT_transform_feedback:
2255 * A program will fail to link if:
2256 *
2257 * * the <count> specified by TransformFeedbackVaryingsEXT is
2258 * non-zero, but the program object has no vertex or geometry
2259 * shader;
2260 */
2261 if (first == MESA_SHADER_FRAGMENT) {
2262 linker_error(prog, "Transform feedback varyings specified, but "
2263 "no vertex or geometry shader is present.");
2264 goto done;
2265 }
2266
2267 tfeedback_decls = ralloc_array(mem_ctx, tfeedback_decl,
2268 prog->TransformFeedback.NumVarying);
2269 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2270 prog->TransformFeedback.VaryingNames,
2271 tfeedback_decls))
2272 goto done;
2273 }
2274
2275 /* Linking the stages in the opposite order (from fragment to vertex)
2276 * ensures that inter-shader outputs written to in an earlier stage are
2277 * eliminated if they are (transitively) not used in a later stage.
2278 */
2279 int last, next;
2280 for (last = MESA_SHADER_TYPES-1; last >= 0; last--) {
2281 if (prog->_LinkedShaders[last] != NULL)
2282 break;
2283 }
2284
2285 if (last >= 0 && last < MESA_SHADER_FRAGMENT) {
2286 gl_shader *const sh = prog->_LinkedShaders[last];
2287
2288 if (num_tfeedback_decls != 0) {
2289 /* There was no fragment shader, but we still have to assign varying
2290 * locations for use by transform feedback.
2291 */
2292 if (!assign_varying_locations(ctx, mem_ctx, prog,
2293 sh, NULL,
2294 num_tfeedback_decls, tfeedback_decls,
2295 0))
2296 goto done;
2297 }
2298
2299 do_dead_builtin_varyings(ctx, sh, NULL,
2300 num_tfeedback_decls, tfeedback_decls);
2301
2302 demote_shader_inputs_and_outputs(sh, ir_var_shader_out);
2303
2304 /* Eliminate code that is now dead due to unused outputs being demoted.
2305 */
2306 while (do_dead_code(sh->ir, false))
2307 ;
2308 }
2309 else if (first == MESA_SHADER_FRAGMENT) {
2310 /* If the program only contains a fragment shader...
2311 */
2312 gl_shader *const sh = prog->_LinkedShaders[first];
2313
2314 do_dead_builtin_varyings(ctx, NULL, sh,
2315 num_tfeedback_decls, tfeedback_decls);
2316
2317 demote_shader_inputs_and_outputs(sh, ir_var_shader_in);
2318
2319 while (do_dead_code(sh->ir, false))
2320 ;
2321 }
2322
2323 next = last;
2324 for (int i = next - 1; i >= 0; i--) {
2325 if (prog->_LinkedShaders[i] == NULL)
2326 continue;
2327
2328 gl_shader *const sh_i = prog->_LinkedShaders[i];
2329 gl_shader *const sh_next = prog->_LinkedShaders[next];
2330 unsigned gs_input_vertices =
2331 next == MESA_SHADER_GEOMETRY ? prog->Geom.VerticesIn : 0;
2332
2333 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2334 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2335 tfeedback_decls, gs_input_vertices))
2336 goto done;
2337
2338 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2339 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2340 tfeedback_decls);
2341
2342 demote_shader_inputs_and_outputs(sh_i, ir_var_shader_out);
2343 demote_shader_inputs_and_outputs(sh_next, ir_var_shader_in);
2344
2345 /* Eliminate code that is now dead due to unused outputs being demoted.
2346 */
2347 while (do_dead_code(sh_i->ir, false))
2348 ;
2349 while (do_dead_code(sh_next->ir, false))
2350 ;
2351
2352 /* This must be done after all dead varyings are eliminated. */
2353 if (!check_against_output_limit(ctx, prog, sh_i))
2354 goto done;
2355 if (!check_against_input_limit(ctx, prog, sh_next))
2356 goto done;
2357
2358 next = i;
2359 }
2360
2361 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls))
2362 goto done;
2363
2364 update_array_sizes(prog);
2365 link_assign_uniform_locations(prog);
2366 link_assign_atomic_counter_resources(ctx, prog);
2367 store_fragdepth_layout(prog);
2368
2369 check_resources(ctx, prog);
2370 link_check_atomic_counter_resources(ctx, prog);
2371
2372 if (!prog->LinkStatus)
2373 goto done;
2374
2375 /* OpenGL ES requires that a vertex shader and a fragment shader both be
2376 * present in a linked program. By checking prog->IsES, we also
2377 * catch the GL_ARB_ES2_compatibility case.
2378 */
2379 if (!prog->InternalSeparateShader &&
2380 (ctx->API == API_OPENGLES2 || prog->IsES)) {
2381 if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
2382 linker_error(prog, "program lacks a vertex shader\n");
2383 } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
2384 linker_error(prog, "program lacks a fragment shader\n");
2385 }
2386 }
2387
2388 /* FINISHME: Assign fragment shader output locations. */
2389
2390 done:
2391 free(vert_shader_list);
2392 free(frag_shader_list);
2393 free(geom_shader_list);
2394
2395 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
2396 if (prog->_LinkedShaders[i] == NULL)
2397 continue;
2398
2399 /* Do a final validation step to make sure that the IR wasn't
2400 * invalidated by any modifications performed after intrastage linking.
2401 */
2402 validate_ir_tree(prog->_LinkedShaders[i]->ir);
2403
2404 /* Retain any live IR, but trash the rest. */
2405 reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);
2406
2407 /* The symbol table in the linked shaders may contain references to
2408 * variables that were removed (e.g., unused uniforms). Since it may
2409 * contain junk, there is no possible valid use. Delete it and set the
2410 * pointer to NULL.
2411 */
2412 delete prog->_LinkedShaders[i]->symbols;
2413 prog->_LinkedShaders[i]->symbols = NULL;
2414 }
2415
2416 ralloc_free(mem_ctx);
2417 }