Use line number information from entire function expression
[mesa.git] / src / glsl / linker.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file linker.cpp
26 * GLSL linker implementation
27 *
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
30 *
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
33 * together.
34 *
35 * - Undefined references in each shader are resolve to definitions in
36 * another shader.
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
41 *
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
44 *
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
47 *
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
51 * \c gl_FragColor.
52 *
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
55 *
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
63 *
64 * \author Ian Romanick <ian.d.romanick@intel.com>
65 */
66
67 #include "main/core.h"
68 #include "glsl_symbol_table.h"
69 #include "glsl_parser_extras.h"
70 #include "ir.h"
71 #include "program.h"
72 #include "program/hash_table.h"
73 #include "linker.h"
74 #include "link_varyings.h"
75 #include "ir_optimization.h"
76 #include "ir_rvalue_visitor.h"
77
78 extern "C" {
79 #include "main/shaderobj.h"
80 #include "main/enums.h"
81 }
82
83 void linker_error(gl_shader_program *, const char *, ...);
84
85 namespace {
86
87 /**
88 * Visitor that determines whether or not a variable is ever written.
89 */
90 class find_assignment_visitor : public ir_hierarchical_visitor {
91 public:
92 find_assignment_visitor(const char *name)
93 : name(name), found(false)
94 {
95 /* empty */
96 }
97
98 virtual ir_visitor_status visit_enter(ir_assignment *ir)
99 {
100 ir_variable *const var = ir->lhs->variable_referenced();
101
102 if (strcmp(name, var->name) == 0) {
103 found = true;
104 return visit_stop;
105 }
106
107 return visit_continue_with_parent;
108 }
109
110 virtual ir_visitor_status visit_enter(ir_call *ir)
111 {
112 exec_list_iterator sig_iter = ir->callee->parameters.iterator();
113 foreach_iter(exec_list_iterator, iter, *ir) {
114 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
115 ir_variable *sig_param = (ir_variable *)sig_iter.get();
116
117 if (sig_param->data.mode == ir_var_function_out ||
118 sig_param->data.mode == ir_var_function_inout) {
119 ir_variable *var = param_rval->variable_referenced();
120 if (var && strcmp(name, var->name) == 0) {
121 found = true;
122 return visit_stop;
123 }
124 }
125 sig_iter.next();
126 }
127
128 if (ir->return_deref != NULL) {
129 ir_variable *const var = ir->return_deref->variable_referenced();
130
131 if (strcmp(name, var->name) == 0) {
132 found = true;
133 return visit_stop;
134 }
135 }
136
137 return visit_continue_with_parent;
138 }
139
140 bool variable_found()
141 {
142 return found;
143 }
144
145 private:
146 const char *name; /**< Find writes to a variable with this name. */
147 bool found; /**< Was a write to the variable found? */
148 };
149
150
151 /**
152 * Visitor that determines whether or not a variable is ever read.
153 */
154 class find_deref_visitor : public ir_hierarchical_visitor {
155 public:
156 find_deref_visitor(const char *name)
157 : name(name), found(false)
158 {
159 /* empty */
160 }
161
162 virtual ir_visitor_status visit(ir_dereference_variable *ir)
163 {
164 if (strcmp(this->name, ir->var->name) == 0) {
165 this->found = true;
166 return visit_stop;
167 }
168
169 return visit_continue;
170 }
171
172 bool variable_found() const
173 {
174 return this->found;
175 }
176
177 private:
178 const char *name; /**< Find writes to a variable with this name. */
179 bool found; /**< Was a write to the variable found? */
180 };
181
182
183 class geom_array_resize_visitor : public ir_hierarchical_visitor {
184 public:
185 unsigned num_vertices;
186 gl_shader_program *prog;
187
188 geom_array_resize_visitor(unsigned num_vertices, gl_shader_program *prog)
189 {
190 this->num_vertices = num_vertices;
191 this->prog = prog;
192 }
193
194 virtual ~geom_array_resize_visitor()
195 {
196 /* empty */
197 }
198
199 virtual ir_visitor_status visit(ir_variable *var)
200 {
201 if (!var->type->is_array() || var->data.mode != ir_var_shader_in)
202 return visit_continue;
203
204 unsigned size = var->type->length;
205
206 /* Generate a link error if the shader has declared this array with an
207 * incorrect size.
208 */
209 if (size && size != this->num_vertices) {
210 linker_error(this->prog, "size of array %s declared as %u, "
211 "but number of input vertices is %u\n",
212 var->name, size, this->num_vertices);
213 return visit_continue;
214 }
215
216 /* Generate a link error if the shader attempts to access an input
217 * array using an index too large for its actual size assigned at link
218 * time.
219 */
220 if (var->data.max_array_access >= this->num_vertices) {
221 linker_error(this->prog, "geometry shader accesses element %i of "
222 "%s, but only %i input vertices\n",
223 var->data.max_array_access, var->name, this->num_vertices);
224 return visit_continue;
225 }
226
227 var->type = glsl_type::get_array_instance(var->type->element_type(),
228 this->num_vertices);
229 var->data.max_array_access = this->num_vertices - 1;
230
231 return visit_continue;
232 }
233
234 /* Dereferences of input variables need to be updated so that their type
235 * matches the newly assigned type of the variable they are accessing. */
236 virtual ir_visitor_status visit(ir_dereference_variable *ir)
237 {
238 ir->type = ir->var->type;
239 return visit_continue;
240 }
241
242 /* Dereferences of 2D input arrays need to be updated so that their type
243 * matches the newly assigned type of the array they are accessing. */
244 virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
245 {
246 const glsl_type *const vt = ir->array->type;
247 if (vt->is_array())
248 ir->type = vt->element_type();
249 return visit_continue;
250 }
251 };
252
253
254 /**
255 * Visitor that determines whether or not a shader uses ir_end_primitive.
256 */
257 class find_end_primitive_visitor : public ir_hierarchical_visitor {
258 public:
259 find_end_primitive_visitor()
260 : found(false)
261 {
262 /* empty */
263 }
264
265 virtual ir_visitor_status visit(ir_end_primitive *)
266 {
267 found = true;
268 return visit_stop;
269 }
270
271 bool end_primitive_found()
272 {
273 return found;
274 }
275
276 private:
277 bool found;
278 };
279
280 } /* anonymous namespace */
281
282 void
283 linker_error(gl_shader_program *prog, const char *fmt, ...)
284 {
285 va_list ap;
286
287 ralloc_strcat(&prog->InfoLog, "error: ");
288 va_start(ap, fmt);
289 ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
290 va_end(ap);
291
292 prog->LinkStatus = false;
293 }
294
295
296 void
297 linker_warning(gl_shader_program *prog, const char *fmt, ...)
298 {
299 va_list ap;
300
301 ralloc_strcat(&prog->InfoLog, "error: ");
302 va_start(ap, fmt);
303 ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
304 va_end(ap);
305
306 }
307
308
309 /**
310 * Given a string identifying a program resource, break it into a base name
311 * and an optional array index in square brackets.
312 *
313 * If an array index is present, \c out_base_name_end is set to point to the
314 * "[" that precedes the array index, and the array index itself is returned
315 * as a long.
316 *
317 * If no array index is present (or if the array index is negative or
318 * mal-formed), \c out_base_name_end, is set to point to the null terminator
319 * at the end of the input string, and -1 is returned.
320 *
321 * Only the final array index is parsed; if the string contains other array
322 * indices (or structure field accesses), they are left in the base name.
323 *
324 * No attempt is made to check that the base name is properly formed;
325 * typically the caller will look up the base name in a hash table, so
326 * ill-formed base names simply turn into hash table lookup failures.
327 */
328 long
329 parse_program_resource_name(const GLchar *name,
330 const GLchar **out_base_name_end)
331 {
332 /* Section 7.3.1 ("Program Interfaces") of the OpenGL 4.3 spec says:
333 *
334 * "When an integer array element or block instance number is part of
335 * the name string, it will be specified in decimal form without a "+"
336 * or "-" sign or any extra leading zeroes. Additionally, the name
337 * string will not include white space anywhere in the string."
338 */
339
340 const size_t len = strlen(name);
341 *out_base_name_end = name + len;
342
343 if (len == 0 || name[len-1] != ']')
344 return -1;
345
346 /* Walk backwards over the string looking for a non-digit character. This
347 * had better be the opening bracket for an array index.
348 *
349 * Initially, i specifies the location of the ']'. Since the string may
350 * contain only the ']' charcater, walk backwards very carefully.
351 */
352 unsigned i;
353 for (i = len - 1; (i > 0) && isdigit(name[i-1]); --i)
354 /* empty */ ;
355
356 if ((i == 0) || name[i-1] != '[')
357 return -1;
358
359 long array_index = strtol(&name[i], NULL, 10);
360 if (array_index < 0)
361 return -1;
362
363 *out_base_name_end = name + (i - 1);
364 return array_index;
365 }
366
367
368 void
369 link_invalidate_variable_locations(exec_list *ir)
370 {
371 foreach_list(node, ir) {
372 ir_variable *const var = ((ir_instruction *) node)->as_variable();
373
374 if (var == NULL)
375 continue;
376
377 /* Only assign locations for variables that lack an explicit location.
378 * Explicit locations are set for all built-in variables, generic vertex
379 * shader inputs (via layout(location=...)), and generic fragment shader
380 * outputs (also via layout(location=...)).
381 */
382 if (!var->data.explicit_location) {
383 var->data.location = -1;
384 var->data.location_frac = 0;
385 }
386
387 /* ir_variable::is_unmatched_generic_inout is used by the linker while
388 * connecting outputs from one stage to inputs of the next stage.
389 *
390 * There are two implicit assumptions here. First, we assume that any
391 * built-in variable (i.e., non-generic in or out) will have
392 * explicit_location set. Second, we assume that any generic in or out
393 * will not have explicit_location set.
394 *
395 * This second assumption will only be valid until
396 * GL_ARB_separate_shader_objects is supported. When that extension is
397 * implemented, this function will need some modifications.
398 */
399 if (!var->data.explicit_location) {
400 var->data.is_unmatched_generic_inout = 1;
401 } else {
402 var->data.is_unmatched_generic_inout = 0;
403 }
404 }
405 }
406
407
408 /**
409 * Set UsesClipDistance and ClipDistanceArraySize based on the given shader.
410 *
411 * Also check for errors based on incorrect usage of gl_ClipVertex and
412 * gl_ClipDistance.
413 *
414 * Return false if an error was reported.
415 */
416 static void
417 analyze_clip_usage(const char *shader_type, struct gl_shader_program *prog,
418 struct gl_shader *shader, GLboolean *UsesClipDistance,
419 GLuint *ClipDistanceArraySize)
420 {
421 *ClipDistanceArraySize = 0;
422
423 if (!prog->IsES && prog->Version >= 130) {
424 /* From section 7.1 (Vertex Shader Special Variables) of the
425 * GLSL 1.30 spec:
426 *
427 * "It is an error for a shader to statically write both
428 * gl_ClipVertex and gl_ClipDistance."
429 *
430 * This does not apply to GLSL ES shaders, since GLSL ES defines neither
431 * gl_ClipVertex nor gl_ClipDistance.
432 */
433 find_assignment_visitor clip_vertex("gl_ClipVertex");
434 find_assignment_visitor clip_distance("gl_ClipDistance");
435
436 clip_vertex.run(shader->ir);
437 clip_distance.run(shader->ir);
438 if (clip_vertex.variable_found() && clip_distance.variable_found()) {
439 linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
440 "and `gl_ClipDistance'\n", shader_type);
441 return;
442 }
443 *UsesClipDistance = clip_distance.variable_found();
444 ir_variable *clip_distance_var =
445 shader->symbols->get_variable("gl_ClipDistance");
446 if (clip_distance_var)
447 *ClipDistanceArraySize = clip_distance_var->type->length;
448 } else {
449 *UsesClipDistance = false;
450 }
451 }
452
453
454 /**
455 * Verify that a vertex shader executable meets all semantic requirements.
456 *
457 * Also sets prog->Vert.UsesClipDistance and prog->Vert.ClipDistanceArraySize
458 * as a side effect.
459 *
460 * \param shader Vertex shader executable to be verified
461 */
462 void
463 validate_vertex_shader_executable(struct gl_shader_program *prog,
464 struct gl_shader *shader)
465 {
466 if (shader == NULL)
467 return;
468
469 /* From the GLSL 1.10 spec, page 48:
470 *
471 * "The variable gl_Position is available only in the vertex
472 * language and is intended for writing the homogeneous vertex
473 * position. All executions of a well-formed vertex shader
474 * executable must write a value into this variable. [...] The
475 * variable gl_Position is available only in the vertex
476 * language and is intended for writing the homogeneous vertex
477 * position. All executions of a well-formed vertex shader
478 * executable must write a value into this variable."
479 *
480 * while in GLSL 1.40 this text is changed to:
481 *
482 * "The variable gl_Position is available only in the vertex
483 * language and is intended for writing the homogeneous vertex
484 * position. It can be written at any time during shader
485 * execution. It may also be read back by a vertex shader
486 * after being written. This value will be used by primitive
487 * assembly, clipping, culling, and other fixed functionality
488 * operations, if present, that operate on primitives after
489 * vertex processing has occurred. Its value is undefined if
490 * the vertex shader executable does not write gl_Position."
491 *
492 * GLSL ES 3.00 is similar to GLSL 1.40--failing to write to gl_Position is
493 * not an error.
494 */
495 if (prog->Version < (prog->IsES ? 300 : 140)) {
496 find_assignment_visitor find("gl_Position");
497 find.run(shader->ir);
498 if (!find.variable_found()) {
499 linker_error(prog, "vertex shader does not write to `gl_Position'\n");
500 return;
501 }
502 }
503
504 analyze_clip_usage("vertex", prog, shader, &prog->Vert.UsesClipDistance,
505 &prog->Vert.ClipDistanceArraySize);
506 }
507
508
509 /**
510 * Verify that a fragment shader executable meets all semantic requirements
511 *
512 * \param shader Fragment shader executable to be verified
513 */
514 void
515 validate_fragment_shader_executable(struct gl_shader_program *prog,
516 struct gl_shader *shader)
517 {
518 if (shader == NULL)
519 return;
520
521 find_assignment_visitor frag_color("gl_FragColor");
522 find_assignment_visitor frag_data("gl_FragData");
523
524 frag_color.run(shader->ir);
525 frag_data.run(shader->ir);
526
527 if (frag_color.variable_found() && frag_data.variable_found()) {
528 linker_error(prog, "fragment shader writes to both "
529 "`gl_FragColor' and `gl_FragData'\n");
530 }
531 }
532
533 /**
534 * Verify that a geometry shader executable meets all semantic requirements
535 *
536 * Also sets prog->Geom.VerticesIn, prog->Geom.UsesClipDistance, and
537 * prog->Geom.ClipDistanceArraySize as a side effect.
538 *
539 * \param shader Geometry shader executable to be verified
540 */
541 void
542 validate_geometry_shader_executable(struct gl_shader_program *prog,
543 struct gl_shader *shader)
544 {
545 if (shader == NULL)
546 return;
547
548 unsigned num_vertices = vertices_per_prim(prog->Geom.InputType);
549 prog->Geom.VerticesIn = num_vertices;
550
551 analyze_clip_usage("geometry", prog, shader, &prog->Geom.UsesClipDistance,
552 &prog->Geom.ClipDistanceArraySize);
553
554 find_end_primitive_visitor end_primitive;
555 end_primitive.run(shader->ir);
556 prog->Geom.UsesEndPrimitive = end_primitive.end_primitive_found();
557 }
558
559
560 /**
561 * Perform validation of global variables used across multiple shaders
562 */
563 void
564 cross_validate_globals(struct gl_shader_program *prog,
565 struct gl_shader **shader_list,
566 unsigned num_shaders,
567 bool uniforms_only)
568 {
569 /* Examine all of the uniforms in all of the shaders and cross validate
570 * them.
571 */
572 glsl_symbol_table variables;
573 for (unsigned i = 0; i < num_shaders; i++) {
574 if (shader_list[i] == NULL)
575 continue;
576
577 foreach_list(node, shader_list[i]->ir) {
578 ir_variable *const var = ((ir_instruction *) node)->as_variable();
579
580 if (var == NULL)
581 continue;
582
583 if (uniforms_only && (var->data.mode != ir_var_uniform))
584 continue;
585
586 /* Don't cross validate temporaries that are at global scope. These
587 * will eventually get pulled into the shaders 'main'.
588 */
589 if (var->data.mode == ir_var_temporary)
590 continue;
591
592 /* If a global with this name has already been seen, verify that the
593 * new instance has the same type. In addition, if the globals have
594 * initializers, the values of the initializers must be the same.
595 */
596 ir_variable *const existing = variables.get_variable(var->name);
597 if (existing != NULL) {
598 if (var->type != existing->type) {
599 /* Consider the types to be "the same" if both types are arrays
600 * of the same type and one of the arrays is implicitly sized.
601 * In addition, set the type of the linked variable to the
602 * explicitly sized array.
603 */
604 if (var->type->is_array()
605 && existing->type->is_array()
606 && (var->type->fields.array == existing->type->fields.array)
607 && ((var->type->length == 0)
608 || (existing->type->length == 0))) {
609 if (var->type->length != 0) {
610 existing->type = var->type;
611 }
612 } else {
613 linker_error(prog, "%s `%s' declared as type "
614 "`%s' and type `%s'\n",
615 mode_string(var),
616 var->name, var->type->name,
617 existing->type->name);
618 return;
619 }
620 }
621
622 if (var->data.explicit_location) {
623 if (existing->data.explicit_location
624 && (var->data.location != existing->data.location)) {
625 linker_error(prog, "explicit locations for %s "
626 "`%s' have differing values\n",
627 mode_string(var), var->name);
628 return;
629 }
630
631 existing->data.location = var->data.location;
632 existing->data.explicit_location = true;
633 }
634
635 /* From the GLSL 4.20 specification:
636 * "A link error will result if two compilation units in a program
637 * specify different integer-constant bindings for the same
638 * opaque-uniform name. However, it is not an error to specify a
639 * binding on some but not all declarations for the same name"
640 */
641 if (var->data.explicit_binding) {
642 if (existing->data.explicit_binding &&
643 var->data.binding != existing->data.binding) {
644 linker_error(prog, "explicit bindings for %s "
645 "`%s' have differing values\n",
646 mode_string(var), var->name);
647 return;
648 }
649
650 existing->data.binding = var->data.binding;
651 existing->data.explicit_binding = true;
652 }
653
654 if (var->type->contains_atomic() &&
655 var->data.atomic.offset != existing->data.atomic.offset) {
656 linker_error(prog, "offset specifications for %s "
657 "`%s' have differing values\n",
658 mode_string(var), var->name);
659 return;
660 }
661
662 /* Validate layout qualifiers for gl_FragDepth.
663 *
664 * From the AMD/ARB_conservative_depth specs:
665 *
666 * "If gl_FragDepth is redeclared in any fragment shader in a
667 * program, it must be redeclared in all fragment shaders in
668 * that program that have static assignments to
669 * gl_FragDepth. All redeclarations of gl_FragDepth in all
670 * fragment shaders in a single program must have the same set
671 * of qualifiers."
672 */
673 if (strcmp(var->name, "gl_FragDepth") == 0) {
674 bool layout_declared = var->data.depth_layout != ir_depth_layout_none;
675 bool layout_differs =
676 var->data.depth_layout != existing->data.depth_layout;
677
678 if (layout_declared && layout_differs) {
679 linker_error(prog,
680 "All redeclarations of gl_FragDepth in all "
681 "fragment shaders in a single program must have "
682 "the same set of qualifiers.");
683 }
684
685 if (var->data.used && layout_differs) {
686 linker_error(prog,
687 "If gl_FragDepth is redeclared with a layout "
688 "qualifier in any fragment shader, it must be "
689 "redeclared with the same layout qualifier in "
690 "all fragment shaders that have assignments to "
691 "gl_FragDepth");
692 }
693 }
694
695 /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
696 *
697 * "If a shared global has multiple initializers, the
698 * initializers must all be constant expressions, and they
699 * must all have the same value. Otherwise, a link error will
700 * result. (A shared global having only one initializer does
701 * not require that initializer to be a constant expression.)"
702 *
703 * Previous to 4.20 the GLSL spec simply said that initializers
704 * must have the same value. In this case of non-constant
705 * initializers, this was impossible to determine. As a result,
706 * no vendor actually implemented that behavior. The 4.20
707 * behavior matches the implemented behavior of at least one other
708 * vendor, so we'll implement that for all GLSL versions.
709 */
710 if (var->constant_initializer != NULL) {
711 if (existing->constant_initializer != NULL) {
712 if (!var->constant_initializer->has_value(existing->constant_initializer)) {
713 linker_error(prog, "initializers for %s "
714 "`%s' have differing values\n",
715 mode_string(var), var->name);
716 return;
717 }
718 } else {
719 /* If the first-seen instance of a particular uniform did not
720 * have an initializer but a later instance does, copy the
721 * initializer to the version stored in the symbol table.
722 */
723 /* FINISHME: This is wrong. The constant_value field should
724 * FINISHME: not be modified! Imagine a case where a shader
725 * FINISHME: without an initializer is linked in two different
726 * FINISHME: programs with shaders that have differing
727 * FINISHME: initializers. Linking with the first will
728 * FINISHME: modify the shader, and linking with the second
729 * FINISHME: will fail.
730 */
731 existing->constant_initializer =
732 var->constant_initializer->clone(ralloc_parent(existing),
733 NULL);
734 }
735 }
736
737 if (var->data.has_initializer) {
738 if (existing->data.has_initializer
739 && (var->constant_initializer == NULL
740 || existing->constant_initializer == NULL)) {
741 linker_error(prog,
742 "shared global variable `%s' has multiple "
743 "non-constant initializers.\n",
744 var->name);
745 return;
746 }
747
748 /* Some instance had an initializer, so keep track of that. In
749 * this location, all sorts of initializers (constant or
750 * otherwise) will propagate the existence to the variable
751 * stored in the symbol table.
752 */
753 existing->data.has_initializer = true;
754 }
755
756 if (existing->data.invariant != var->data.invariant) {
757 linker_error(prog, "declarations for %s `%s' have "
758 "mismatching invariant qualifiers\n",
759 mode_string(var), var->name);
760 return;
761 }
762 if (existing->data.centroid != var->data.centroid) {
763 linker_error(prog, "declarations for %s `%s' have "
764 "mismatching centroid qualifiers\n",
765 mode_string(var), var->name);
766 return;
767 }
768 if (existing->data.sample != var->data.sample) {
769 linker_error(prog, "declarations for %s `%s` have "
770 "mismatching sample qualifiers\n",
771 mode_string(var), var->name);
772 return;
773 }
774 } else
775 variables.add_variable(var);
776 }
777 }
778 }
779
780
781 /**
782 * Perform validation of uniforms used across multiple shader stages
783 */
784 void
785 cross_validate_uniforms(struct gl_shader_program *prog)
786 {
787 cross_validate_globals(prog, prog->_LinkedShaders,
788 MESA_SHADER_TYPES, true);
789 }
790
791 /**
792 * Accumulates the array of prog->UniformBlocks and checks that all
793 * definitons of blocks agree on their contents.
794 */
795 static bool
796 interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog)
797 {
798 unsigned max_num_uniform_blocks = 0;
799 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
800 if (prog->_LinkedShaders[i])
801 max_num_uniform_blocks += prog->_LinkedShaders[i]->NumUniformBlocks;
802 }
803
804 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
805 struct gl_shader *sh = prog->_LinkedShaders[i];
806
807 prog->UniformBlockStageIndex[i] = ralloc_array(prog, int,
808 max_num_uniform_blocks);
809 for (unsigned int j = 0; j < max_num_uniform_blocks; j++)
810 prog->UniformBlockStageIndex[i][j] = -1;
811
812 if (sh == NULL)
813 continue;
814
815 for (unsigned int j = 0; j < sh->NumUniformBlocks; j++) {
816 int index = link_cross_validate_uniform_block(prog,
817 &prog->UniformBlocks,
818 &prog->NumUniformBlocks,
819 &sh->UniformBlocks[j]);
820
821 if (index == -1) {
822 linker_error(prog, "uniform block `%s' has mismatching definitions",
823 sh->UniformBlocks[j].Name);
824 return false;
825 }
826
827 prog->UniformBlockStageIndex[i][index] = j;
828 }
829 }
830
831 return true;
832 }
833
834
835 /**
836 * Populates a shaders symbol table with all global declarations
837 */
838 static void
839 populate_symbol_table(gl_shader *sh)
840 {
841 sh->symbols = new(sh) glsl_symbol_table;
842
843 foreach_list(node, sh->ir) {
844 ir_instruction *const inst = (ir_instruction *) node;
845 ir_variable *var;
846 ir_function *func;
847
848 if ((func = inst->as_function()) != NULL) {
849 sh->symbols->add_function(func);
850 } else if ((var = inst->as_variable()) != NULL) {
851 sh->symbols->add_variable(var);
852 }
853 }
854 }
855
856
857 /**
858 * Remap variables referenced in an instruction tree
859 *
860 * This is used when instruction trees are cloned from one shader and placed in
861 * another. These trees will contain references to \c ir_variable nodes that
862 * do not exist in the target shader. This function finds these \c ir_variable
863 * references and replaces the references with matching variables in the target
864 * shader.
865 *
866 * If there is no matching variable in the target shader, a clone of the
867 * \c ir_variable is made and added to the target shader. The new variable is
868 * added to \b both the instruction stream and the symbol table.
869 *
870 * \param inst IR tree that is to be processed.
871 * \param symbols Symbol table containing global scope symbols in the
872 * linked shader.
873 * \param instructions Instruction stream where new variable declarations
874 * should be added.
875 */
876 void
877 remap_variables(ir_instruction *inst, struct gl_shader *target,
878 hash_table *temps)
879 {
880 class remap_visitor : public ir_hierarchical_visitor {
881 public:
882 remap_visitor(struct gl_shader *target,
883 hash_table *temps)
884 {
885 this->target = target;
886 this->symbols = target->symbols;
887 this->instructions = target->ir;
888 this->temps = temps;
889 }
890
891 virtual ir_visitor_status visit(ir_dereference_variable *ir)
892 {
893 if (ir->var->data.mode == ir_var_temporary) {
894 ir_variable *var = (ir_variable *) hash_table_find(temps, ir->var);
895
896 assert(var != NULL);
897 ir->var = var;
898 return visit_continue;
899 }
900
901 ir_variable *const existing =
902 this->symbols->get_variable(ir->var->name);
903 if (existing != NULL)
904 ir->var = existing;
905 else {
906 ir_variable *copy = ir->var->clone(this->target, NULL);
907
908 this->symbols->add_variable(copy);
909 this->instructions->push_head(copy);
910 ir->var = copy;
911 }
912
913 return visit_continue;
914 }
915
916 private:
917 struct gl_shader *target;
918 glsl_symbol_table *symbols;
919 exec_list *instructions;
920 hash_table *temps;
921 };
922
923 remap_visitor v(target, temps);
924
925 inst->accept(&v);
926 }
927
928
929 /**
930 * Move non-declarations from one instruction stream to another
931 *
932 * The intended usage pattern of this function is to pass the pointer to the
933 * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
934 * pointer) for \c last and \c false for \c make_copies on the first
935 * call. Successive calls pass the return value of the previous call for
936 * \c last and \c true for \c make_copies.
937 *
938 * \param instructions Source instruction stream
939 * \param last Instruction after which new instructions should be
940 * inserted in the target instruction stream
941 * \param make_copies Flag selecting whether instructions in \c instructions
942 * should be copied (via \c ir_instruction::clone) into the
943 * target list or moved.
944 *
945 * \return
946 * The new "last" instruction in the target instruction stream. This pointer
947 * is suitable for use as the \c last parameter of a later call to this
948 * function.
949 */
950 exec_node *
951 move_non_declarations(exec_list *instructions, exec_node *last,
952 bool make_copies, gl_shader *target)
953 {
954 hash_table *temps = NULL;
955
956 if (make_copies)
957 temps = hash_table_ctor(0, hash_table_pointer_hash,
958 hash_table_pointer_compare);
959
960 foreach_list_safe(node, instructions) {
961 ir_instruction *inst = (ir_instruction *) node;
962
963 if (inst->as_function())
964 continue;
965
966 ir_variable *var = inst->as_variable();
967 if ((var != NULL) && (var->data.mode != ir_var_temporary))
968 continue;
969
970 assert(inst->as_assignment()
971 || inst->as_call()
972 || inst->as_if() /* for initializers with the ?: operator */
973 || ((var != NULL) && (var->data.mode == ir_var_temporary)));
974
975 if (make_copies) {
976 inst = inst->clone(target, NULL);
977
978 if (var != NULL)
979 hash_table_insert(temps, inst, var);
980 else
981 remap_variables(inst, target, temps);
982 } else {
983 inst->remove();
984 }
985
986 last->insert_after(inst);
987 last = inst;
988 }
989
990 if (make_copies)
991 hash_table_dtor(temps);
992
993 return last;
994 }
995
996 /**
997 * Get the function signature for main from a shader
998 */
999 static ir_function_signature *
1000 get_main_function_signature(gl_shader *sh)
1001 {
1002 ir_function *const f = sh->symbols->get_function("main");
1003 if (f != NULL) {
1004 exec_list void_parameters;
1005
1006 /* Look for the 'void main()' signature and ensure that it's defined.
1007 * This keeps the linker from accidentally pick a shader that just
1008 * contains a prototype for main.
1009 *
1010 * We don't have to check for multiple definitions of main (in multiple
1011 * shaders) because that would have already been caught above.
1012 */
1013 ir_function_signature *sig = f->matching_signature(NULL, &void_parameters);
1014 if ((sig != NULL) && sig->is_defined) {
1015 return sig;
1016 }
1017 }
1018
1019 return NULL;
1020 }
1021
1022
1023 /**
1024 * This class is only used in link_intrastage_shaders() below but declaring
1025 * it inside that function leads to compiler warnings with some versions of
1026 * gcc.
1027 */
1028 class array_sizing_visitor : public ir_hierarchical_visitor {
1029 public:
1030 array_sizing_visitor()
1031 : mem_ctx(ralloc_context(NULL)),
1032 unnamed_interfaces(hash_table_ctor(0, hash_table_pointer_hash,
1033 hash_table_pointer_compare))
1034 {
1035 }
1036
1037 ~array_sizing_visitor()
1038 {
1039 hash_table_dtor(this->unnamed_interfaces);
1040 ralloc_free(this->mem_ctx);
1041 }
1042
1043 virtual ir_visitor_status visit(ir_variable *var)
1044 {
1045 fixup_type(&var->type, var->data.max_array_access);
1046 if (var->type->is_interface()) {
1047 if (interface_contains_unsized_arrays(var->type)) {
1048 const glsl_type *new_type =
1049 resize_interface_members(var->type, var->max_ifc_array_access);
1050 var->type = new_type;
1051 var->change_interface_type(new_type);
1052 }
1053 } else if (var->type->is_array() &&
1054 var->type->fields.array->is_interface()) {
1055 if (interface_contains_unsized_arrays(var->type->fields.array)) {
1056 const glsl_type *new_type =
1057 resize_interface_members(var->type->fields.array,
1058 var->max_ifc_array_access);
1059 var->change_interface_type(new_type);
1060 var->type =
1061 glsl_type::get_array_instance(new_type, var->type->length);
1062 }
1063 } else if (const glsl_type *ifc_type = var->get_interface_type()) {
1064 /* Store a pointer to the variable in the unnamed_interfaces
1065 * hashtable.
1066 */
1067 ir_variable **interface_vars = (ir_variable **)
1068 hash_table_find(this->unnamed_interfaces, ifc_type);
1069 if (interface_vars == NULL) {
1070 interface_vars = rzalloc_array(mem_ctx, ir_variable *,
1071 ifc_type->length);
1072 hash_table_insert(this->unnamed_interfaces, interface_vars,
1073 ifc_type);
1074 }
1075 unsigned index = ifc_type->field_index(var->name);
1076 assert(index < ifc_type->length);
1077 assert(interface_vars[index] == NULL);
1078 interface_vars[index] = var;
1079 }
1080 return visit_continue;
1081 }
1082
1083 /**
1084 * For each unnamed interface block that was discovered while running the
1085 * visitor, adjust the interface type to reflect the newly assigned array
1086 * sizes, and fix up the ir_variable nodes to point to the new interface
1087 * type.
1088 */
1089 void fixup_unnamed_interface_types()
1090 {
1091 hash_table_call_foreach(this->unnamed_interfaces,
1092 fixup_unnamed_interface_type, NULL);
1093 }
1094
1095 private:
1096 /**
1097 * If the type pointed to by \c type represents an unsized array, replace
1098 * it with a sized array whose size is determined by max_array_access.
1099 */
1100 static void fixup_type(const glsl_type **type, unsigned max_array_access)
1101 {
1102 if ((*type)->is_unsized_array()) {
1103 *type = glsl_type::get_array_instance((*type)->fields.array,
1104 max_array_access + 1);
1105 assert(*type != NULL);
1106 }
1107 }
1108
1109 /**
1110 * Determine whether the given interface type contains unsized arrays (if
1111 * it doesn't, array_sizing_visitor doesn't need to process it).
1112 */
1113 static bool interface_contains_unsized_arrays(const glsl_type *type)
1114 {
1115 for (unsigned i = 0; i < type->length; i++) {
1116 const glsl_type *elem_type = type->fields.structure[i].type;
1117 if (elem_type->is_unsized_array())
1118 return true;
1119 }
1120 return false;
1121 }
1122
1123 /**
1124 * Create a new interface type based on the given type, with unsized arrays
1125 * replaced by sized arrays whose size is determined by
1126 * max_ifc_array_access.
1127 */
1128 static const glsl_type *
1129 resize_interface_members(const glsl_type *type,
1130 const unsigned *max_ifc_array_access)
1131 {
1132 unsigned num_fields = type->length;
1133 glsl_struct_field *fields = new glsl_struct_field[num_fields];
1134 memcpy(fields, type->fields.structure,
1135 num_fields * sizeof(*fields));
1136 for (unsigned i = 0; i < num_fields; i++) {
1137 fixup_type(&fields[i].type, max_ifc_array_access[i]);
1138 }
1139 glsl_interface_packing packing =
1140 (glsl_interface_packing) type->interface_packing;
1141 const glsl_type *new_ifc_type =
1142 glsl_type::get_interface_instance(fields, num_fields,
1143 packing, type->name);
1144 delete [] fields;
1145 return new_ifc_type;
1146 }
1147
1148 static void fixup_unnamed_interface_type(const void *key, void *data,
1149 void *)
1150 {
1151 const glsl_type *ifc_type = (const glsl_type *) key;
1152 ir_variable **interface_vars = (ir_variable **) data;
1153 unsigned num_fields = ifc_type->length;
1154 glsl_struct_field *fields = new glsl_struct_field[num_fields];
1155 memcpy(fields, ifc_type->fields.structure,
1156 num_fields * sizeof(*fields));
1157 bool interface_type_changed = false;
1158 for (unsigned i = 0; i < num_fields; i++) {
1159 if (interface_vars[i] != NULL &&
1160 fields[i].type != interface_vars[i]->type) {
1161 fields[i].type = interface_vars[i]->type;
1162 interface_type_changed = true;
1163 }
1164 }
1165 if (!interface_type_changed) {
1166 delete [] fields;
1167 return;
1168 }
1169 glsl_interface_packing packing =
1170 (glsl_interface_packing) ifc_type->interface_packing;
1171 const glsl_type *new_ifc_type =
1172 glsl_type::get_interface_instance(fields, num_fields, packing,
1173 ifc_type->name);
1174 delete [] fields;
1175 for (unsigned i = 0; i < num_fields; i++) {
1176 if (interface_vars[i] != NULL)
1177 interface_vars[i]->change_interface_type(new_ifc_type);
1178 }
1179 }
1180
1181 /**
1182 * Memory context used to allocate the data in \c unnamed_interfaces.
1183 */
1184 void *mem_ctx;
1185
1186 /**
1187 * Hash table from const glsl_type * to an array of ir_variable *'s
1188 * pointing to the ir_variables constituting each unnamed interface block.
1189 */
1190 hash_table *unnamed_interfaces;
1191 };
1192
1193 /**
1194 * Performs the cross-validation of geometry shader max_vertices and
1195 * primitive type layout qualifiers for the attached geometry shaders,
1196 * and propagates them to the linked GS and linked shader program.
1197 */
1198 static void
1199 link_gs_inout_layout_qualifiers(struct gl_shader_program *prog,
1200 struct gl_shader *linked_shader,
1201 struct gl_shader **shader_list,
1202 unsigned num_shaders)
1203 {
1204 linked_shader->Geom.VerticesOut = 0;
1205 linked_shader->Geom.InputType = PRIM_UNKNOWN;
1206 linked_shader->Geom.OutputType = PRIM_UNKNOWN;
1207
1208 /* No in/out qualifiers defined for anything but GLSL 1.50+
1209 * geometry shaders so far.
1210 */
1211 if (linked_shader->Type != GL_GEOMETRY_SHADER || prog->Version < 150)
1212 return;
1213
1214 /* From the GLSL 1.50 spec, page 46:
1215 *
1216 * "All geometry shader output layout declarations in a program
1217 * must declare the same layout and same value for
1218 * max_vertices. There must be at least one geometry output
1219 * layout declaration somewhere in a program, but not all
1220 * geometry shaders (compilation units) are required to
1221 * declare it."
1222 */
1223
1224 for (unsigned i = 0; i < num_shaders; i++) {
1225 struct gl_shader *shader = shader_list[i];
1226
1227 if (shader->Geom.InputType != PRIM_UNKNOWN) {
1228 if (linked_shader->Geom.InputType != PRIM_UNKNOWN &&
1229 linked_shader->Geom.InputType != shader->Geom.InputType) {
1230 linker_error(prog, "geometry shader defined with conflicting "
1231 "input types\n");
1232 return;
1233 }
1234 linked_shader->Geom.InputType = shader->Geom.InputType;
1235 }
1236
1237 if (shader->Geom.OutputType != PRIM_UNKNOWN) {
1238 if (linked_shader->Geom.OutputType != PRIM_UNKNOWN &&
1239 linked_shader->Geom.OutputType != shader->Geom.OutputType) {
1240 linker_error(prog, "geometry shader defined with conflicting "
1241 "output types\n");
1242 return;
1243 }
1244 linked_shader->Geom.OutputType = shader->Geom.OutputType;
1245 }
1246
1247 if (shader->Geom.VerticesOut != 0) {
1248 if (linked_shader->Geom.VerticesOut != 0 &&
1249 linked_shader->Geom.VerticesOut != shader->Geom.VerticesOut) {
1250 linker_error(prog, "geometry shader defined with conflicting "
1251 "output vertex count (%d and %d)\n",
1252 linked_shader->Geom.VerticesOut,
1253 shader->Geom.VerticesOut);
1254 return;
1255 }
1256 linked_shader->Geom.VerticesOut = shader->Geom.VerticesOut;
1257 }
1258 }
1259
1260 /* Just do the intrastage -> interstage propagation right now,
1261 * since we already know we're in the right type of shader program
1262 * for doing it.
1263 */
1264 if (linked_shader->Geom.InputType == PRIM_UNKNOWN) {
1265 linker_error(prog,
1266 "geometry shader didn't declare primitive input type\n");
1267 return;
1268 }
1269 prog->Geom.InputType = linked_shader->Geom.InputType;
1270
1271 if (linked_shader->Geom.OutputType == PRIM_UNKNOWN) {
1272 linker_error(prog,
1273 "geometry shader didn't declare primitive output type\n");
1274 return;
1275 }
1276 prog->Geom.OutputType = linked_shader->Geom.OutputType;
1277
1278 if (linked_shader->Geom.VerticesOut == 0) {
1279 linker_error(prog,
1280 "geometry shader didn't declare max_vertices\n");
1281 return;
1282 }
1283 prog->Geom.VerticesOut = linked_shader->Geom.VerticesOut;
1284 }
1285
1286 /**
1287 * Combine a group of shaders for a single stage to generate a linked shader
1288 *
1289 * \note
1290 * If this function is supplied a single shader, it is cloned, and the new
1291 * shader is returned.
1292 */
1293 static struct gl_shader *
1294 link_intrastage_shaders(void *mem_ctx,
1295 struct gl_context *ctx,
1296 struct gl_shader_program *prog,
1297 struct gl_shader **shader_list,
1298 unsigned num_shaders)
1299 {
1300 struct gl_uniform_block *uniform_blocks = NULL;
1301
1302 /* Check that global variables defined in multiple shaders are consistent.
1303 */
1304 cross_validate_globals(prog, shader_list, num_shaders, false);
1305 if (!prog->LinkStatus)
1306 return NULL;
1307
1308 /* Check that interface blocks defined in multiple shaders are consistent.
1309 */
1310 validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
1311 num_shaders);
1312 if (!prog->LinkStatus)
1313 return NULL;
1314
1315 /* Link up uniform blocks defined within this stage. */
1316 const unsigned num_uniform_blocks =
1317 link_uniform_blocks(mem_ctx, prog, shader_list, num_shaders,
1318 &uniform_blocks);
1319
1320 /* Check that there is only a single definition of each function signature
1321 * across all shaders.
1322 */
1323 for (unsigned i = 0; i < (num_shaders - 1); i++) {
1324 foreach_list(node, shader_list[i]->ir) {
1325 ir_function *const f = ((ir_instruction *) node)->as_function();
1326
1327 if (f == NULL)
1328 continue;
1329
1330 for (unsigned j = i + 1; j < num_shaders; j++) {
1331 ir_function *const other =
1332 shader_list[j]->symbols->get_function(f->name);
1333
1334 /* If the other shader has no function (and therefore no function
1335 * signatures) with the same name, skip to the next shader.
1336 */
1337 if (other == NULL)
1338 continue;
1339
1340 foreach_iter (exec_list_iterator, iter, *f) {
1341 ir_function_signature *sig =
1342 (ir_function_signature *) iter.get();
1343
1344 if (!sig->is_defined || sig->is_builtin())
1345 continue;
1346
1347 ir_function_signature *other_sig =
1348 other->exact_matching_signature(NULL, &sig->parameters);
1349
1350 if ((other_sig != NULL) && other_sig->is_defined
1351 && !other_sig->is_builtin()) {
1352 linker_error(prog, "function `%s' is multiply defined",
1353 f->name);
1354 return NULL;
1355 }
1356 }
1357 }
1358 }
1359 }
1360
1361 /* Find the shader that defines main, and make a clone of it.
1362 *
1363 * Starting with the clone, search for undefined references. If one is
1364 * found, find the shader that defines it. Clone the reference and add
1365 * it to the shader. Repeat until there are no undefined references or
1366 * until a reference cannot be resolved.
1367 */
1368 gl_shader *main = NULL;
1369 for (unsigned i = 0; i < num_shaders; i++) {
1370 if (get_main_function_signature(shader_list[i]) != NULL) {
1371 main = shader_list[i];
1372 break;
1373 }
1374 }
1375
1376 if (main == NULL) {
1377 linker_error(prog, "%s shader lacks `main'\n",
1378 _mesa_glsl_shader_target_name(shader_list[0]->Type));
1379 return NULL;
1380 }
1381
1382 gl_shader *linked = ctx->Driver.NewShader(NULL, 0, main->Type);
1383 linked->ir = new(linked) exec_list;
1384 clone_ir_list(mem_ctx, linked->ir, main->ir);
1385
1386 linked->UniformBlocks = uniform_blocks;
1387 linked->NumUniformBlocks = num_uniform_blocks;
1388 ralloc_steal(linked, linked->UniformBlocks);
1389
1390 link_gs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
1391
1392 populate_symbol_table(linked);
1393
1394 /* The a pointer to the main function in the final linked shader (i.e., the
1395 * copy of the original shader that contained the main function).
1396 */
1397 ir_function_signature *const main_sig = get_main_function_signature(linked);
1398
1399 /* Move any instructions other than variable declarations or function
1400 * declarations into main.
1401 */
1402 exec_node *insertion_point =
1403 move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
1404 linked);
1405
1406 for (unsigned i = 0; i < num_shaders; i++) {
1407 if (shader_list[i] == main)
1408 continue;
1409
1410 insertion_point = move_non_declarations(shader_list[i]->ir,
1411 insertion_point, true, linked);
1412 }
1413
1414 /* Check if any shader needs built-in functions. */
1415 bool need_builtins = false;
1416 for (unsigned i = 0; i < num_shaders; i++) {
1417 if (shader_list[i]->uses_builtin_functions) {
1418 need_builtins = true;
1419 break;
1420 }
1421 }
1422
1423 bool ok;
1424 if (need_builtins) {
1425 /* Make a temporary array one larger than shader_list, which will hold
1426 * the built-in function shader as well.
1427 */
1428 gl_shader **linking_shaders = (gl_shader **)
1429 calloc(num_shaders + 1, sizeof(gl_shader *));
1430 memcpy(linking_shaders, shader_list, num_shaders * sizeof(gl_shader *));
1431 linking_shaders[num_shaders] = _mesa_glsl_get_builtin_function_shader();
1432
1433 ok = link_function_calls(prog, linked, linking_shaders, num_shaders + 1);
1434
1435 free(linking_shaders);
1436 } else {
1437 ok = link_function_calls(prog, linked, shader_list, num_shaders);
1438 }
1439
1440
1441 if (!ok) {
1442 ctx->Driver.DeleteShader(ctx, linked);
1443 return NULL;
1444 }
1445
1446 /* At this point linked should contain all of the linked IR, so
1447 * validate it to make sure nothing went wrong.
1448 */
1449 validate_ir_tree(linked->ir);
1450
1451 /* Set the size of geometry shader input arrays */
1452 if (linked->Type == GL_GEOMETRY_SHADER) {
1453 unsigned num_vertices = vertices_per_prim(prog->Geom.InputType);
1454 geom_array_resize_visitor input_resize_visitor(num_vertices, prog);
1455 foreach_iter(exec_list_iterator, iter, *linked->ir) {
1456 ir_instruction *ir = (ir_instruction *)iter.get();
1457 ir->accept(&input_resize_visitor);
1458 }
1459 }
1460
1461 /* Make a pass over all variable declarations to ensure that arrays with
1462 * unspecified sizes have a size specified. The size is inferred from the
1463 * max_array_access field.
1464 */
1465 array_sizing_visitor v;
1466 v.run(linked->ir);
1467 v.fixup_unnamed_interface_types();
1468
1469 return linked;
1470 }
1471
1472 /**
1473 * Update the sizes of linked shader uniform arrays to the maximum
1474 * array index used.
1475 *
1476 * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
1477 *
1478 * If one or more elements of an array are active,
1479 * GetActiveUniform will return the name of the array in name,
1480 * subject to the restrictions listed above. The type of the array
1481 * is returned in type. The size parameter contains the highest
1482 * array element index used, plus one. The compiler or linker
1483 * determines the highest index used. There will be only one
1484 * active uniform reported by the GL per uniform array.
1485
1486 */
1487 static void
1488 update_array_sizes(struct gl_shader_program *prog)
1489 {
1490 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
1491 if (prog->_LinkedShaders[i] == NULL)
1492 continue;
1493
1494 foreach_list(node, prog->_LinkedShaders[i]->ir) {
1495 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1496
1497 if ((var == NULL) || (var->data.mode != ir_var_uniform) ||
1498 !var->type->is_array())
1499 continue;
1500
1501 /* GL_ARB_uniform_buffer_object says that std140 uniforms
1502 * will not be eliminated. Since we always do std140, just
1503 * don't resize arrays in UBOs.
1504 *
1505 * Atomic counters are supposed to get deterministic
1506 * locations assigned based on the declaration ordering and
1507 * sizes, array compaction would mess that up.
1508 */
1509 if (var->is_in_uniform_block() || var->type->contains_atomic())
1510 continue;
1511
1512 unsigned int size = var->data.max_array_access;
1513 for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) {
1514 if (prog->_LinkedShaders[j] == NULL)
1515 continue;
1516
1517 foreach_list(node2, prog->_LinkedShaders[j]->ir) {
1518 ir_variable *other_var = ((ir_instruction *) node2)->as_variable();
1519 if (!other_var)
1520 continue;
1521
1522 if (strcmp(var->name, other_var->name) == 0 &&
1523 other_var->data.max_array_access > size) {
1524 size = other_var->data.max_array_access;
1525 }
1526 }
1527 }
1528
1529 if (size + 1 != var->type->length) {
1530 /* If this is a built-in uniform (i.e., it's backed by some
1531 * fixed-function state), adjust the number of state slots to
1532 * match the new array size. The number of slots per array entry
1533 * is not known. It seems safe to assume that the total number of
1534 * slots is an integer multiple of the number of array elements.
1535 * Determine the number of slots per array element by dividing by
1536 * the old (total) size.
1537 */
1538 if (var->num_state_slots > 0) {
1539 var->num_state_slots = (size + 1)
1540 * (var->num_state_slots / var->type->length);
1541 }
1542
1543 var->type = glsl_type::get_array_instance(var->type->fields.array,
1544 size + 1);
1545 /* FINISHME: We should update the types of array
1546 * dereferences of this variable now.
1547 */
1548 }
1549 }
1550 }
1551 }
1552
1553 /**
1554 * Find a contiguous set of available bits in a bitmask.
1555 *
1556 * \param used_mask Bits representing used (1) and unused (0) locations
1557 * \param needed_count Number of contiguous bits needed.
1558 *
1559 * \return
1560 * Base location of the available bits on success or -1 on failure.
1561 */
1562 int
1563 find_available_slots(unsigned used_mask, unsigned needed_count)
1564 {
1565 unsigned needed_mask = (1 << needed_count) - 1;
1566 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
1567
1568 /* The comparison to 32 is redundant, but without it GCC emits "warning:
1569 * cannot optimize possibly infinite loops" for the loop below.
1570 */
1571 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
1572 return -1;
1573
1574 for (int i = 0; i <= max_bit_to_test; i++) {
1575 if ((needed_mask & ~used_mask) == needed_mask)
1576 return i;
1577
1578 needed_mask <<= 1;
1579 }
1580
1581 return -1;
1582 }
1583
1584
1585 /**
1586 * Assign locations for either VS inputs for FS outputs
1587 *
1588 * \param prog Shader program whose variables need locations assigned
1589 * \param target_index Selector for the program target to receive location
1590 * assignmnets. Must be either \c MESA_SHADER_VERTEX or
1591 * \c MESA_SHADER_FRAGMENT.
1592 * \param max_index Maximum number of generic locations. This corresponds
1593 * to either the maximum number of draw buffers or the
1594 * maximum number of generic attributes.
1595 *
1596 * \return
1597 * If locations are successfully assigned, true is returned. Otherwise an
1598 * error is emitted to the shader link log and false is returned.
1599 */
1600 bool
1601 assign_attribute_or_color_locations(gl_shader_program *prog,
1602 unsigned target_index,
1603 unsigned max_index)
1604 {
1605 /* Mark invalid locations as being used.
1606 */
1607 unsigned used_locations = (max_index >= 32)
1608 ? ~0 : ~((1 << max_index) - 1);
1609
1610 assert((target_index == MESA_SHADER_VERTEX)
1611 || (target_index == MESA_SHADER_FRAGMENT));
1612
1613 gl_shader *const sh = prog->_LinkedShaders[target_index];
1614 if (sh == NULL)
1615 return true;
1616
1617 /* Operate in a total of four passes.
1618 *
1619 * 1. Invalidate the location assignments for all vertex shader inputs.
1620 *
1621 * 2. Assign locations for inputs that have user-defined (via
1622 * glBindVertexAttribLocation) locations and outputs that have
1623 * user-defined locations (via glBindFragDataLocation).
1624 *
1625 * 3. Sort the attributes without assigned locations by number of slots
1626 * required in decreasing order. Fragmentation caused by attribute
1627 * locations assigned by the application may prevent large attributes
1628 * from having enough contiguous space.
1629 *
1630 * 4. Assign locations to any inputs without assigned locations.
1631 */
1632
1633 const int generic_base = (target_index == MESA_SHADER_VERTEX)
1634 ? (int) VERT_ATTRIB_GENERIC0 : (int) FRAG_RESULT_DATA0;
1635
1636 const enum ir_variable_mode direction =
1637 (target_index == MESA_SHADER_VERTEX)
1638 ? ir_var_shader_in : ir_var_shader_out;
1639
1640
1641 /* Temporary storage for the set of attributes that need locations assigned.
1642 */
1643 struct temp_attr {
1644 unsigned slots;
1645 ir_variable *var;
1646
1647 /* Used below in the call to qsort. */
1648 static int compare(const void *a, const void *b)
1649 {
1650 const temp_attr *const l = (const temp_attr *) a;
1651 const temp_attr *const r = (const temp_attr *) b;
1652
1653 /* Reversed because we want a descending order sort below. */
1654 return r->slots - l->slots;
1655 }
1656 } to_assign[16];
1657
1658 unsigned num_attr = 0;
1659
1660 foreach_list(node, sh->ir) {
1661 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1662
1663 if ((var == NULL) || (var->data.mode != (unsigned) direction))
1664 continue;
1665
1666 if (var->data.explicit_location) {
1667 if ((var->data.location >= (int)(max_index + generic_base))
1668 || (var->data.location < 0)) {
1669 linker_error(prog,
1670 "invalid explicit location %d specified for `%s'\n",
1671 (var->data.location < 0)
1672 ? var->data.location
1673 : var->data.location - generic_base,
1674 var->name);
1675 return false;
1676 }
1677 } else if (target_index == MESA_SHADER_VERTEX) {
1678 unsigned binding;
1679
1680 if (prog->AttributeBindings->get(binding, var->name)) {
1681 assert(binding >= VERT_ATTRIB_GENERIC0);
1682 var->data.location = binding;
1683 var->data.is_unmatched_generic_inout = 0;
1684 }
1685 } else if (target_index == MESA_SHADER_FRAGMENT) {
1686 unsigned binding;
1687 unsigned index;
1688
1689 if (prog->FragDataBindings->get(binding, var->name)) {
1690 assert(binding >= FRAG_RESULT_DATA0);
1691 var->data.location = binding;
1692 var->data.is_unmatched_generic_inout = 0;
1693
1694 if (prog->FragDataIndexBindings->get(index, var->name)) {
1695 var->data.index = index;
1696 }
1697 }
1698 }
1699
1700 /* If the variable is not a built-in and has a location statically
1701 * assigned in the shader (presumably via a layout qualifier), make sure
1702 * that it doesn't collide with other assigned locations. Otherwise,
1703 * add it to the list of variables that need linker-assigned locations.
1704 */
1705 const unsigned slots = var->type->count_attribute_slots();
1706 if (var->data.location != -1) {
1707 if (var->data.location >= generic_base && var->data.index < 1) {
1708 /* From page 61 of the OpenGL 4.0 spec:
1709 *
1710 * "LinkProgram will fail if the attribute bindings assigned
1711 * by BindAttribLocation do not leave not enough space to
1712 * assign a location for an active matrix attribute or an
1713 * active attribute array, both of which require multiple
1714 * contiguous generic attributes."
1715 *
1716 * Previous versions of the spec contain similar language but omit
1717 * the bit about attribute arrays.
1718 *
1719 * Page 61 of the OpenGL 4.0 spec also says:
1720 *
1721 * "It is possible for an application to bind more than one
1722 * attribute name to the same location. This is referred to as
1723 * aliasing. This will only work if only one of the aliased
1724 * attributes is active in the executable program, or if no
1725 * path through the shader consumes more than one attribute of
1726 * a set of attributes aliased to the same location. A link
1727 * error can occur if the linker determines that every path
1728 * through the shader consumes multiple aliased attributes,
1729 * but implementations are not required to generate an error
1730 * in this case."
1731 *
1732 * These two paragraphs are either somewhat contradictory, or I
1733 * don't fully understand one or both of them.
1734 */
1735 /* FINISHME: The code as currently written does not support
1736 * FINISHME: attribute location aliasing (see comment above).
1737 */
1738 /* Mask representing the contiguous slots that will be used by
1739 * this attribute.
1740 */
1741 const unsigned attr = var->data.location - generic_base;
1742 const unsigned use_mask = (1 << slots) - 1;
1743
1744 /* Generate a link error if the set of bits requested for this
1745 * attribute overlaps any previously allocated bits.
1746 */
1747 if ((~(use_mask << attr) & used_locations) != used_locations) {
1748 const char *const string = (target_index == MESA_SHADER_VERTEX)
1749 ? "vertex shader input" : "fragment shader output";
1750 linker_error(prog,
1751 "insufficient contiguous locations "
1752 "available for %s `%s' %d %d %d", string,
1753 var->name, used_locations, use_mask, attr);
1754 return false;
1755 }
1756
1757 used_locations |= (use_mask << attr);
1758 }
1759
1760 continue;
1761 }
1762
1763 to_assign[num_attr].slots = slots;
1764 to_assign[num_attr].var = var;
1765 num_attr++;
1766 }
1767
1768 /* If all of the attributes were assigned locations by the application (or
1769 * are built-in attributes with fixed locations), return early. This should
1770 * be the common case.
1771 */
1772 if (num_attr == 0)
1773 return true;
1774
1775 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
1776
1777 if (target_index == MESA_SHADER_VERTEX) {
1778 /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS. It can
1779 * only be explicitly assigned by via glBindAttribLocation. Mark it as
1780 * reserved to prevent it from being automatically allocated below.
1781 */
1782 find_deref_visitor find("gl_Vertex");
1783 find.run(sh->ir);
1784 if (find.variable_found())
1785 used_locations |= (1 << 0);
1786 }
1787
1788 for (unsigned i = 0; i < num_attr; i++) {
1789 /* Mask representing the contiguous slots that will be used by this
1790 * attribute.
1791 */
1792 const unsigned use_mask = (1 << to_assign[i].slots) - 1;
1793
1794 int location = find_available_slots(used_locations, to_assign[i].slots);
1795
1796 if (location < 0) {
1797 const char *const string = (target_index == MESA_SHADER_VERTEX)
1798 ? "vertex shader input" : "fragment shader output";
1799
1800 linker_error(prog,
1801 "insufficient contiguous locations "
1802 "available for %s `%s'",
1803 string, to_assign[i].var->name);
1804 return false;
1805 }
1806
1807 to_assign[i].var->data.location = generic_base + location;
1808 to_assign[i].var->data.is_unmatched_generic_inout = 0;
1809 used_locations |= (use_mask << location);
1810 }
1811
1812 return true;
1813 }
1814
1815
1816 /**
1817 * Demote shader inputs and outputs that are not used in other stages
1818 */
1819 void
1820 demote_shader_inputs_and_outputs(gl_shader *sh, enum ir_variable_mode mode)
1821 {
1822 foreach_list(node, sh->ir) {
1823 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1824
1825 if ((var == NULL) || (var->data.mode != int(mode)))
1826 continue;
1827
1828 /* A shader 'in' or 'out' variable is only really an input or output if
1829 * its value is used by other shader stages. This will cause the variable
1830 * to have a location assigned.
1831 */
1832 if (var->data.is_unmatched_generic_inout) {
1833 var->data.mode = ir_var_auto;
1834 }
1835 }
1836 }
1837
1838
1839 /**
1840 * Store the gl_FragDepth layout in the gl_shader_program struct.
1841 */
1842 static void
1843 store_fragdepth_layout(struct gl_shader_program *prog)
1844 {
1845 if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
1846 return;
1847 }
1848
1849 struct exec_list *ir = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir;
1850
1851 /* We don't look up the gl_FragDepth symbol directly because if
1852 * gl_FragDepth is not used in the shader, it's removed from the IR.
1853 * However, the symbol won't be removed from the symbol table.
1854 *
1855 * We're only interested in the cases where the variable is NOT removed
1856 * from the IR.
1857 */
1858 foreach_list(node, ir) {
1859 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1860
1861 if (var == NULL || var->data.mode != ir_var_shader_out) {
1862 continue;
1863 }
1864
1865 if (strcmp(var->name, "gl_FragDepth") == 0) {
1866 switch (var->data.depth_layout) {
1867 case ir_depth_layout_none:
1868 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
1869 return;
1870 case ir_depth_layout_any:
1871 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
1872 return;
1873 case ir_depth_layout_greater:
1874 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
1875 return;
1876 case ir_depth_layout_less:
1877 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
1878 return;
1879 case ir_depth_layout_unchanged:
1880 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
1881 return;
1882 default:
1883 assert(0);
1884 return;
1885 }
1886 }
1887 }
1888 }
1889
1890 /**
1891 * Validate the resources used by a program versus the implementation limits
1892 */
1893 static void
1894 check_resources(struct gl_context *ctx, struct gl_shader_program *prog)
1895 {
1896 static const char *const shader_names[MESA_SHADER_TYPES] = {
1897 "vertex", "geometry", "fragment"
1898 };
1899
1900 const unsigned max_samplers[MESA_SHADER_TYPES] = {
1901 ctx->Const.VertexProgram.MaxTextureImageUnits,
1902 ctx->Const.GeometryProgram.MaxTextureImageUnits,
1903 ctx->Const.FragmentProgram.MaxTextureImageUnits
1904 };
1905
1906 const unsigned max_default_uniform_components[MESA_SHADER_TYPES] = {
1907 ctx->Const.VertexProgram.MaxUniformComponents,
1908 ctx->Const.GeometryProgram.MaxUniformComponents,
1909 ctx->Const.FragmentProgram.MaxUniformComponents
1910 };
1911
1912 const unsigned max_combined_uniform_components[MESA_SHADER_TYPES] = {
1913 ctx->Const.VertexProgram.MaxCombinedUniformComponents,
1914 ctx->Const.GeometryProgram.MaxCombinedUniformComponents,
1915 ctx->Const.FragmentProgram.MaxCombinedUniformComponents
1916 };
1917
1918 const unsigned max_uniform_blocks[MESA_SHADER_TYPES] = {
1919 ctx->Const.VertexProgram.MaxUniformBlocks,
1920 ctx->Const.GeometryProgram.MaxUniformBlocks,
1921 ctx->Const.FragmentProgram.MaxUniformBlocks
1922 };
1923
1924 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
1925 struct gl_shader *sh = prog->_LinkedShaders[i];
1926
1927 if (sh == NULL)
1928 continue;
1929
1930 if (sh->num_samplers > max_samplers[i]) {
1931 linker_error(prog, "Too many %s shader texture samplers",
1932 shader_names[i]);
1933 }
1934
1935 if (sh->num_uniform_components > max_default_uniform_components[i]) {
1936 if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
1937 linker_warning(prog, "Too many %s shader default uniform block "
1938 "components, but the driver will try to optimize "
1939 "them out; this is non-portable out-of-spec "
1940 "behavior\n",
1941 shader_names[i]);
1942 } else {
1943 linker_error(prog, "Too many %s shader default uniform block "
1944 "components",
1945 shader_names[i]);
1946 }
1947 }
1948
1949 if (sh->num_combined_uniform_components >
1950 max_combined_uniform_components[i]) {
1951 if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
1952 linker_warning(prog, "Too many %s shader uniform components, "
1953 "but the driver will try to optimize them out; "
1954 "this is non-portable out-of-spec behavior\n",
1955 shader_names[i]);
1956 } else {
1957 linker_error(prog, "Too many %s shader uniform components",
1958 shader_names[i]);
1959 }
1960 }
1961 }
1962
1963 unsigned blocks[MESA_SHADER_TYPES] = {0};
1964 unsigned total_uniform_blocks = 0;
1965
1966 for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
1967 for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) {
1968 if (prog->UniformBlockStageIndex[j][i] != -1) {
1969 blocks[j]++;
1970 total_uniform_blocks++;
1971 }
1972 }
1973
1974 if (total_uniform_blocks > ctx->Const.MaxCombinedUniformBlocks) {
1975 linker_error(prog, "Too many combined uniform blocks (%d/%d)",
1976 prog->NumUniformBlocks,
1977 ctx->Const.MaxCombinedUniformBlocks);
1978 } else {
1979 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
1980 if (blocks[i] > max_uniform_blocks[i]) {
1981 linker_error(prog, "Too many %s uniform blocks (%d/%d)",
1982 shader_names[i],
1983 blocks[i],
1984 max_uniform_blocks[i]);
1985 break;
1986 }
1987 }
1988 }
1989 }
1990 }
1991
1992 void
1993 link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
1994 {
1995 tfeedback_decl *tfeedback_decls = NULL;
1996 unsigned num_tfeedback_decls = prog->TransformFeedback.NumVarying;
1997
1998 void *mem_ctx = ralloc_context(NULL); // temporary linker context
1999
2000 prog->LinkStatus = true; /* All error paths will set this to false */
2001 prog->Validated = false;
2002 prog->_Used = false;
2003
2004 ralloc_free(prog->InfoLog);
2005 prog->InfoLog = ralloc_strdup(NULL, "");
2006
2007 ralloc_free(prog->UniformBlocks);
2008 prog->UniformBlocks = NULL;
2009 prog->NumUniformBlocks = 0;
2010 for (int i = 0; i < MESA_SHADER_TYPES; i++) {
2011 ralloc_free(prog->UniformBlockStageIndex[i]);
2012 prog->UniformBlockStageIndex[i] = NULL;
2013 }
2014
2015 ralloc_free(prog->AtomicBuffers);
2016 prog->AtomicBuffers = NULL;
2017 prog->NumAtomicBuffers = 0;
2018
2019 /* Separate the shaders into groups based on their type.
2020 */
2021 struct gl_shader **vert_shader_list;
2022 unsigned num_vert_shaders = 0;
2023 struct gl_shader **frag_shader_list;
2024 unsigned num_frag_shaders = 0;
2025 struct gl_shader **geom_shader_list;
2026 unsigned num_geom_shaders = 0;
2027
2028 vert_shader_list = (struct gl_shader **)
2029 calloc(prog->NumShaders, sizeof(struct gl_shader *));
2030 frag_shader_list = (struct gl_shader **)
2031 calloc(prog->NumShaders, sizeof(struct gl_shader *));
2032 geom_shader_list = (struct gl_shader **)
2033 calloc(prog->NumShaders, sizeof(struct gl_shader *));
2034
2035 unsigned min_version = UINT_MAX;
2036 unsigned max_version = 0;
2037 const bool is_es_prog =
2038 (prog->NumShaders > 0 && prog->Shaders[0]->IsES) ? true : false;
2039 for (unsigned i = 0; i < prog->NumShaders; i++) {
2040 min_version = MIN2(min_version, prog->Shaders[i]->Version);
2041 max_version = MAX2(max_version, prog->Shaders[i]->Version);
2042
2043 if (prog->Shaders[i]->IsES != is_es_prog) {
2044 linker_error(prog, "all shaders must use same shading "
2045 "language version\n");
2046 goto done;
2047 }
2048
2049 switch (prog->Shaders[i]->Type) {
2050 case GL_VERTEX_SHADER:
2051 vert_shader_list[num_vert_shaders] = prog->Shaders[i];
2052 num_vert_shaders++;
2053 break;
2054 case GL_FRAGMENT_SHADER:
2055 frag_shader_list[num_frag_shaders] = prog->Shaders[i];
2056 num_frag_shaders++;
2057 break;
2058 case GL_GEOMETRY_SHADER:
2059 geom_shader_list[num_geom_shaders] = prog->Shaders[i];
2060 num_geom_shaders++;
2061 break;
2062 }
2063 }
2064
2065 /* In desktop GLSL, different shader versions may be linked together. In
2066 * GLSL ES, all shader versions must be the same.
2067 */
2068 if (is_es_prog && min_version != max_version) {
2069 linker_error(prog, "all shaders must use same shading "
2070 "language version\n");
2071 goto done;
2072 }
2073
2074 prog->Version = max_version;
2075 prog->IsES = is_es_prog;
2076
2077 /* Geometry shaders have to be linked with vertex shaders.
2078 */
2079 if (num_geom_shaders > 0 && num_vert_shaders == 0) {
2080 linker_error(prog, "Geometry shader must be linked with "
2081 "vertex shader\n");
2082 goto done;
2083 }
2084
2085 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2086 if (prog->_LinkedShaders[i] != NULL)
2087 ctx->Driver.DeleteShader(ctx, prog->_LinkedShaders[i]);
2088
2089 prog->_LinkedShaders[i] = NULL;
2090 }
2091
2092 /* Link all shaders for a particular stage and validate the result.
2093 */
2094 if (num_vert_shaders > 0) {
2095 gl_shader *const sh =
2096 link_intrastage_shaders(mem_ctx, ctx, prog, vert_shader_list,
2097 num_vert_shaders);
2098
2099 if (!prog->LinkStatus)
2100 goto done;
2101
2102 validate_vertex_shader_executable(prog, sh);
2103 if (!prog->LinkStatus)
2104 goto done;
2105 prog->LastClipDistanceArraySize = prog->Vert.ClipDistanceArraySize;
2106
2107 _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_VERTEX],
2108 sh);
2109 }
2110
2111 if (num_frag_shaders > 0) {
2112 gl_shader *const sh =
2113 link_intrastage_shaders(mem_ctx, ctx, prog, frag_shader_list,
2114 num_frag_shaders);
2115
2116 if (!prog->LinkStatus)
2117 goto done;
2118
2119 validate_fragment_shader_executable(prog, sh);
2120 if (!prog->LinkStatus)
2121 goto done;
2122
2123 _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
2124 sh);
2125 }
2126
2127 if (num_geom_shaders > 0) {
2128 gl_shader *const sh =
2129 link_intrastage_shaders(mem_ctx, ctx, prog, geom_shader_list,
2130 num_geom_shaders);
2131
2132 if (!prog->LinkStatus)
2133 goto done;
2134
2135 validate_geometry_shader_executable(prog, sh);
2136 if (!prog->LinkStatus)
2137 goto done;
2138 prog->LastClipDistanceArraySize = prog->Geom.ClipDistanceArraySize;
2139
2140 _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_GEOMETRY],
2141 sh);
2142 }
2143
2144 /* Here begins the inter-stage linking phase. Some initial validation is
2145 * performed, then locations are assigned for uniforms, attributes, and
2146 * varyings.
2147 */
2148 cross_validate_uniforms(prog);
2149 if (!prog->LinkStatus)
2150 goto done;
2151
2152 unsigned prev;
2153
2154 for (prev = 0; prev < MESA_SHADER_TYPES; prev++) {
2155 if (prog->_LinkedShaders[prev] != NULL)
2156 break;
2157 }
2158
2159 /* Validate the inputs of each stage with the output of the preceding
2160 * stage.
2161 */
2162 for (unsigned i = prev + 1; i < MESA_SHADER_TYPES; i++) {
2163 if (prog->_LinkedShaders[i] == NULL)
2164 continue;
2165
2166 validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
2167 prog->_LinkedShaders[i]);
2168 if (!prog->LinkStatus)
2169 goto done;
2170
2171 cross_validate_outputs_to_inputs(prog,
2172 prog->_LinkedShaders[prev],
2173 prog->_LinkedShaders[i]);
2174 if (!prog->LinkStatus)
2175 goto done;
2176
2177 prev = i;
2178 }
2179
2180 /* Cross-validate uniform blocks between shader stages */
2181 validate_interstage_uniform_blocks(prog, prog->_LinkedShaders,
2182 MESA_SHADER_TYPES);
2183 if (!prog->LinkStatus)
2184 goto done;
2185
2186 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2187 if (prog->_LinkedShaders[i] != NULL)
2188 lower_named_interface_blocks(mem_ctx, prog->_LinkedShaders[i]);
2189 }
2190
2191 /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
2192 * it before optimization because we want most of the checks to get
2193 * dropped thanks to constant propagation.
2194 *
2195 * This rule also applies to GLSL ES 3.00.
2196 */
2197 if (max_version >= (is_es_prog ? 300 : 130)) {
2198 struct gl_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
2199 if (sh) {
2200 lower_discard_flow(sh->ir);
2201 }
2202 }
2203
2204 if (!interstage_cross_validate_uniform_blocks(prog))
2205 goto done;
2206
2207 /* Do common optimization before assigning storage for attributes,
2208 * uniforms, and varyings. Later optimization could possibly make
2209 * some of that unused.
2210 */
2211 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
2212 if (prog->_LinkedShaders[i] == NULL)
2213 continue;
2214
2215 detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
2216 if (!prog->LinkStatus)
2217 goto done;
2218
2219 if (ctx->ShaderCompilerOptions[i].LowerClipDistance) {
2220 lower_clip_distance(prog->_LinkedShaders[i]);
2221 }
2222
2223 unsigned max_unroll = ctx->ShaderCompilerOptions[i].MaxUnrollIterations;
2224
2225 while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, false, max_unroll, &ctx->ShaderCompilerOptions[i]))
2226 ;
2227 }
2228
2229 /* Mark all generic shader inputs and outputs as unpaired. */
2230 if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) {
2231 link_invalidate_variable_locations(
2232 prog->_LinkedShaders[MESA_SHADER_VERTEX]->ir);
2233 }
2234 if (prog->_LinkedShaders[MESA_SHADER_GEOMETRY] != NULL) {
2235 link_invalidate_variable_locations(
2236 prog->_LinkedShaders[MESA_SHADER_GEOMETRY]->ir);
2237 }
2238 if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] != NULL) {
2239 link_invalidate_variable_locations(
2240 prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir);
2241 }
2242
2243 /* FINISHME: The value of the max_attribute_index parameter is
2244 * FINISHME: implementation dependent based on the value of
2245 * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
2246 * FINISHME: at least 16, so hardcode 16 for now.
2247 */
2248 if (!assign_attribute_or_color_locations(prog, MESA_SHADER_VERTEX, 16)) {
2249 goto done;
2250 }
2251
2252 if (!assign_attribute_or_color_locations(prog, MESA_SHADER_FRAGMENT, MAX2(ctx->Const.MaxDrawBuffers, ctx->Const.MaxDualSourceDrawBuffers))) {
2253 goto done;
2254 }
2255
2256 unsigned first;
2257 for (first = 0; first < MESA_SHADER_TYPES; first++) {
2258 if (prog->_LinkedShaders[first] != NULL)
2259 break;
2260 }
2261
2262 if (num_tfeedback_decls != 0) {
2263 /* From GL_EXT_transform_feedback:
2264 * A program will fail to link if:
2265 *
2266 * * the <count> specified by TransformFeedbackVaryingsEXT is
2267 * non-zero, but the program object has no vertex or geometry
2268 * shader;
2269 */
2270 if (first == MESA_SHADER_FRAGMENT) {
2271 linker_error(prog, "Transform feedback varyings specified, but "
2272 "no vertex or geometry shader is present.");
2273 goto done;
2274 }
2275
2276 tfeedback_decls = ralloc_array(mem_ctx, tfeedback_decl,
2277 prog->TransformFeedback.NumVarying);
2278 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2279 prog->TransformFeedback.VaryingNames,
2280 tfeedback_decls))
2281 goto done;
2282 }
2283
2284 /* Linking the stages in the opposite order (from fragment to vertex)
2285 * ensures that inter-shader outputs written to in an earlier stage are
2286 * eliminated if they are (transitively) not used in a later stage.
2287 */
2288 int last, next;
2289 for (last = MESA_SHADER_TYPES-1; last >= 0; last--) {
2290 if (prog->_LinkedShaders[last] != NULL)
2291 break;
2292 }
2293
2294 if (last >= 0 && last < MESA_SHADER_FRAGMENT) {
2295 gl_shader *const sh = prog->_LinkedShaders[last];
2296
2297 if (num_tfeedback_decls != 0) {
2298 /* There was no fragment shader, but we still have to assign varying
2299 * locations for use by transform feedback.
2300 */
2301 if (!assign_varying_locations(ctx, mem_ctx, prog,
2302 sh, NULL,
2303 num_tfeedback_decls, tfeedback_decls,
2304 0))
2305 goto done;
2306 }
2307
2308 do_dead_builtin_varyings(ctx, sh, NULL,
2309 num_tfeedback_decls, tfeedback_decls);
2310
2311 demote_shader_inputs_and_outputs(sh, ir_var_shader_out);
2312
2313 /* Eliminate code that is now dead due to unused outputs being demoted.
2314 */
2315 while (do_dead_code(sh->ir, false))
2316 ;
2317 }
2318 else if (first == MESA_SHADER_FRAGMENT) {
2319 /* If the program only contains a fragment shader...
2320 */
2321 gl_shader *const sh = prog->_LinkedShaders[first];
2322
2323 do_dead_builtin_varyings(ctx, NULL, sh,
2324 num_tfeedback_decls, tfeedback_decls);
2325
2326 demote_shader_inputs_and_outputs(sh, ir_var_shader_in);
2327
2328 while (do_dead_code(sh->ir, false))
2329 ;
2330 }
2331
2332 next = last;
2333 for (int i = next - 1; i >= 0; i--) {
2334 if (prog->_LinkedShaders[i] == NULL)
2335 continue;
2336
2337 gl_shader *const sh_i = prog->_LinkedShaders[i];
2338 gl_shader *const sh_next = prog->_LinkedShaders[next];
2339 unsigned gs_input_vertices =
2340 next == MESA_SHADER_GEOMETRY ? prog->Geom.VerticesIn : 0;
2341
2342 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2343 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2344 tfeedback_decls, gs_input_vertices))
2345 goto done;
2346
2347 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2348 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2349 tfeedback_decls);
2350
2351 demote_shader_inputs_and_outputs(sh_i, ir_var_shader_out);
2352 demote_shader_inputs_and_outputs(sh_next, ir_var_shader_in);
2353
2354 /* Eliminate code that is now dead due to unused outputs being demoted.
2355 */
2356 while (do_dead_code(sh_i->ir, false))
2357 ;
2358 while (do_dead_code(sh_next->ir, false))
2359 ;
2360
2361 /* This must be done after all dead varyings are eliminated. */
2362 if (!check_against_output_limit(ctx, prog, sh_i))
2363 goto done;
2364 if (!check_against_input_limit(ctx, prog, sh_next))
2365 goto done;
2366
2367 next = i;
2368 }
2369
2370 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls))
2371 goto done;
2372
2373 update_array_sizes(prog);
2374 link_assign_uniform_locations(prog);
2375 link_assign_atomic_counter_resources(ctx, prog);
2376 store_fragdepth_layout(prog);
2377
2378 check_resources(ctx, prog);
2379 link_check_atomic_counter_resources(ctx, prog);
2380
2381 if (!prog->LinkStatus)
2382 goto done;
2383
2384 /* OpenGL ES requires that a vertex shader and a fragment shader both be
2385 * present in a linked program. By checking prog->IsES, we also
2386 * catch the GL_ARB_ES2_compatibility case.
2387 */
2388 if (!prog->InternalSeparateShader &&
2389 (ctx->API == API_OPENGLES2 || prog->IsES)) {
2390 if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
2391 linker_error(prog, "program lacks a vertex shader\n");
2392 } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
2393 linker_error(prog, "program lacks a fragment shader\n");
2394 }
2395 }
2396
2397 /* FINISHME: Assign fragment shader output locations. */
2398
2399 done:
2400 free(vert_shader_list);
2401 free(frag_shader_list);
2402 free(geom_shader_list);
2403
2404 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
2405 if (prog->_LinkedShaders[i] == NULL)
2406 continue;
2407
2408 /* Do a final validation step to make sure that the IR wasn't
2409 * invalidated by any modifications performed after intrastage linking.
2410 */
2411 validate_ir_tree(prog->_LinkedShaders[i]->ir);
2412
2413 /* Retain any live IR, but trash the rest. */
2414 reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);
2415
2416 /* The symbol table in the linked shaders may contain references to
2417 * variables that were removed (e.g., unused uniforms). Since it may
2418 * contain junk, there is no possible valid use. Delete it and set the
2419 * pointer to NULL.
2420 */
2421 delete prog->_LinkedShaders[i]->symbols;
2422 prog->_LinkedShaders[i]->symbols = NULL;
2423 }
2424
2425 ralloc_free(mem_ctx);
2426 }