i965/fs: New peephole optimization to generate SEL.
[mesa.git] / src / glsl / linker.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file linker.cpp
26 * GLSL linker implementation
27 *
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
30 *
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
33 * together.
34 *
35 * - Undefined references in each shader are resolve to definitions in
36 * another shader.
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
41 *
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
44 *
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
47 *
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
51 * \c gl_FragColor.
52 *
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
55 *
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
63 *
64 * \author Ian Romanick <ian.d.romanick@intel.com>
65 */
66
67 #include "main/core.h"
68 #include "glsl_symbol_table.h"
69 #include "glsl_parser_extras.h"
70 #include "ir.h"
71 #include "program.h"
72 #include "program/hash_table.h"
73 #include "linker.h"
74 #include "link_varyings.h"
75 #include "ir_optimization.h"
76 #include "ir_rvalue_visitor.h"
77
78 extern "C" {
79 #include "main/shaderobj.h"
80 #include "main/enums.h"
81 }
82
83 void linker_error(gl_shader_program *, const char *, ...);
84
85 namespace {
86
87 /**
88 * Visitor that determines whether or not a variable is ever written.
89 */
90 class find_assignment_visitor : public ir_hierarchical_visitor {
91 public:
92 find_assignment_visitor(const char *name)
93 : name(name), found(false)
94 {
95 /* empty */
96 }
97
98 virtual ir_visitor_status visit_enter(ir_assignment *ir)
99 {
100 ir_variable *const var = ir->lhs->variable_referenced();
101
102 if (strcmp(name, var->name) == 0) {
103 found = true;
104 return visit_stop;
105 }
106
107 return visit_continue_with_parent;
108 }
109
110 virtual ir_visitor_status visit_enter(ir_call *ir)
111 {
112 exec_list_iterator sig_iter = ir->callee->parameters.iterator();
113 foreach_iter(exec_list_iterator, iter, *ir) {
114 ir_rvalue *param_rval = (ir_rvalue *)iter.get();
115 ir_variable *sig_param = (ir_variable *)sig_iter.get();
116
117 if (sig_param->mode == ir_var_function_out ||
118 sig_param->mode == ir_var_function_inout) {
119 ir_variable *var = param_rval->variable_referenced();
120 if (var && strcmp(name, var->name) == 0) {
121 found = true;
122 return visit_stop;
123 }
124 }
125 sig_iter.next();
126 }
127
128 if (ir->return_deref != NULL) {
129 ir_variable *const var = ir->return_deref->variable_referenced();
130
131 if (strcmp(name, var->name) == 0) {
132 found = true;
133 return visit_stop;
134 }
135 }
136
137 return visit_continue_with_parent;
138 }
139
140 bool variable_found()
141 {
142 return found;
143 }
144
145 private:
146 const char *name; /**< Find writes to a variable with this name. */
147 bool found; /**< Was a write to the variable found? */
148 };
149
150
151 /**
152 * Visitor that determines whether or not a variable is ever read.
153 */
154 class find_deref_visitor : public ir_hierarchical_visitor {
155 public:
156 find_deref_visitor(const char *name)
157 : name(name), found(false)
158 {
159 /* empty */
160 }
161
162 virtual ir_visitor_status visit(ir_dereference_variable *ir)
163 {
164 if (strcmp(this->name, ir->var->name) == 0) {
165 this->found = true;
166 return visit_stop;
167 }
168
169 return visit_continue;
170 }
171
172 bool variable_found() const
173 {
174 return this->found;
175 }
176
177 private:
178 const char *name; /**< Find writes to a variable with this name. */
179 bool found; /**< Was a write to the variable found? */
180 };
181
182
183 class geom_array_resize_visitor : public ir_hierarchical_visitor {
184 public:
185 unsigned num_vertices;
186 gl_shader_program *prog;
187
188 geom_array_resize_visitor(unsigned num_vertices, gl_shader_program *prog)
189 {
190 this->num_vertices = num_vertices;
191 this->prog = prog;
192 }
193
194 virtual ~geom_array_resize_visitor()
195 {
196 /* empty */
197 }
198
199 virtual ir_visitor_status visit(ir_variable *var)
200 {
201 if (!var->type->is_array() || var->mode != ir_var_shader_in)
202 return visit_continue;
203
204 unsigned size = var->type->length;
205
206 /* Generate a link error if the shader has declared this array with an
207 * incorrect size.
208 */
209 if (size && size != this->num_vertices) {
210 linker_error(this->prog, "size of array %s declared as %u, "
211 "but number of input vertices is %u\n",
212 var->name, size, this->num_vertices);
213 return visit_continue;
214 }
215
216 /* Generate a link error if the shader attempts to access an input
217 * array using an index too large for its actual size assigned at link
218 * time.
219 */
220 if (var->max_array_access >= this->num_vertices) {
221 linker_error(this->prog, "geometry shader accesses element %i of "
222 "%s, but only %i input vertices\n",
223 var->max_array_access, var->name, this->num_vertices);
224 return visit_continue;
225 }
226
227 var->type = glsl_type::get_array_instance(var->type->element_type(),
228 this->num_vertices);
229 var->max_array_access = this->num_vertices - 1;
230
231 return visit_continue;
232 }
233
234 /* Dereferences of input variables need to be updated so that their type
235 * matches the newly assigned type of the variable they are accessing. */
236 virtual ir_visitor_status visit(ir_dereference_variable *ir)
237 {
238 ir->type = ir->var->type;
239 return visit_continue;
240 }
241
242 /* Dereferences of 2D input arrays need to be updated so that their type
243 * matches the newly assigned type of the array they are accessing. */
244 virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
245 {
246 const glsl_type *const vt = ir->array->type;
247 if (vt->is_array())
248 ir->type = vt->element_type();
249 return visit_continue;
250 }
251 };
252
253
254 /**
255 * Visitor that determines whether or not a shader uses ir_end_primitive.
256 */
257 class find_end_primitive_visitor : public ir_hierarchical_visitor {
258 public:
259 find_end_primitive_visitor()
260 : found(false)
261 {
262 /* empty */
263 }
264
265 virtual ir_visitor_status visit(ir_end_primitive *)
266 {
267 found = true;
268 return visit_stop;
269 }
270
271 bool end_primitive_found()
272 {
273 return found;
274 }
275
276 private:
277 bool found;
278 };
279
280 } /* anonymous namespace */
281
282 void
283 linker_error(gl_shader_program *prog, const char *fmt, ...)
284 {
285 va_list ap;
286
287 ralloc_strcat(&prog->InfoLog, "error: ");
288 va_start(ap, fmt);
289 ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
290 va_end(ap);
291
292 prog->LinkStatus = false;
293 }
294
295
296 void
297 linker_warning(gl_shader_program *prog, const char *fmt, ...)
298 {
299 va_list ap;
300
301 ralloc_strcat(&prog->InfoLog, "error: ");
302 va_start(ap, fmt);
303 ralloc_vasprintf_append(&prog->InfoLog, fmt, ap);
304 va_end(ap);
305
306 }
307
308
309 /**
310 * Given a string identifying a program resource, break it into a base name
311 * and an optional array index in square brackets.
312 *
313 * If an array index is present, \c out_base_name_end is set to point to the
314 * "[" that precedes the array index, and the array index itself is returned
315 * as a long.
316 *
317 * If no array index is present (or if the array index is negative or
318 * mal-formed), \c out_base_name_end, is set to point to the null terminator
319 * at the end of the input string, and -1 is returned.
320 *
321 * Only the final array index is parsed; if the string contains other array
322 * indices (or structure field accesses), they are left in the base name.
323 *
324 * No attempt is made to check that the base name is properly formed;
325 * typically the caller will look up the base name in a hash table, so
326 * ill-formed base names simply turn into hash table lookup failures.
327 */
328 long
329 parse_program_resource_name(const GLchar *name,
330 const GLchar **out_base_name_end)
331 {
332 /* Section 7.3.1 ("Program Interfaces") of the OpenGL 4.3 spec says:
333 *
334 * "When an integer array element or block instance number is part of
335 * the name string, it will be specified in decimal form without a "+"
336 * or "-" sign or any extra leading zeroes. Additionally, the name
337 * string will not include white space anywhere in the string."
338 */
339
340 const size_t len = strlen(name);
341 *out_base_name_end = name + len;
342
343 if (len == 0 || name[len-1] != ']')
344 return -1;
345
346 /* Walk backwards over the string looking for a non-digit character. This
347 * had better be the opening bracket for an array index.
348 *
349 * Initially, i specifies the location of the ']'. Since the string may
350 * contain only the ']' charcater, walk backwards very carefully.
351 */
352 unsigned i;
353 for (i = len - 1; (i > 0) && isdigit(name[i-1]); --i)
354 /* empty */ ;
355
356 if ((i == 0) || name[i-1] != '[')
357 return -1;
358
359 long array_index = strtol(&name[i], NULL, 10);
360 if (array_index < 0)
361 return -1;
362
363 *out_base_name_end = name + (i - 1);
364 return array_index;
365 }
366
367
368 void
369 link_invalidate_variable_locations(exec_list *ir)
370 {
371 foreach_list(node, ir) {
372 ir_variable *const var = ((ir_instruction *) node)->as_variable();
373
374 if (var == NULL)
375 continue;
376
377 /* Only assign locations for variables that lack an explicit location.
378 * Explicit locations are set for all built-in variables, generic vertex
379 * shader inputs (via layout(location=...)), and generic fragment shader
380 * outputs (also via layout(location=...)).
381 */
382 if (!var->explicit_location) {
383 var->location = -1;
384 var->location_frac = 0;
385 }
386
387 /* ir_variable::is_unmatched_generic_inout is used by the linker while
388 * connecting outputs from one stage to inputs of the next stage.
389 *
390 * There are two implicit assumptions here. First, we assume that any
391 * built-in variable (i.e., non-generic in or out) will have
392 * explicit_location set. Second, we assume that any generic in or out
393 * will not have explicit_location set.
394 *
395 * This second assumption will only be valid until
396 * GL_ARB_separate_shader_objects is supported. When that extension is
397 * implemented, this function will need some modifications.
398 */
399 if (!var->explicit_location) {
400 var->is_unmatched_generic_inout = 1;
401 } else {
402 var->is_unmatched_generic_inout = 0;
403 }
404 }
405 }
406
407
408 /**
409 * Set UsesClipDistance and ClipDistanceArraySize based on the given shader.
410 *
411 * Also check for errors based on incorrect usage of gl_ClipVertex and
412 * gl_ClipDistance.
413 *
414 * Return false if an error was reported.
415 */
416 static void
417 analyze_clip_usage(const char *shader_type, struct gl_shader_program *prog,
418 struct gl_shader *shader, GLboolean *UsesClipDistance,
419 GLuint *ClipDistanceArraySize)
420 {
421 *ClipDistanceArraySize = 0;
422
423 if (!prog->IsES && prog->Version >= 130) {
424 /* From section 7.1 (Vertex Shader Special Variables) of the
425 * GLSL 1.30 spec:
426 *
427 * "It is an error for a shader to statically write both
428 * gl_ClipVertex and gl_ClipDistance."
429 *
430 * This does not apply to GLSL ES shaders, since GLSL ES defines neither
431 * gl_ClipVertex nor gl_ClipDistance.
432 */
433 find_assignment_visitor clip_vertex("gl_ClipVertex");
434 find_assignment_visitor clip_distance("gl_ClipDistance");
435
436 clip_vertex.run(shader->ir);
437 clip_distance.run(shader->ir);
438 if (clip_vertex.variable_found() && clip_distance.variable_found()) {
439 linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
440 "and `gl_ClipDistance'\n", shader_type);
441 return;
442 }
443 *UsesClipDistance = clip_distance.variable_found();
444 ir_variable *clip_distance_var =
445 shader->symbols->get_variable("gl_ClipDistance");
446 if (clip_distance_var)
447 *ClipDistanceArraySize = clip_distance_var->type->length;
448 } else {
449 *UsesClipDistance = false;
450 }
451 }
452
453
454 /**
455 * Verify that a vertex shader executable meets all semantic requirements.
456 *
457 * Also sets prog->Vert.UsesClipDistance and prog->Vert.ClipDistanceArraySize
458 * as a side effect.
459 *
460 * \param shader Vertex shader executable to be verified
461 */
462 void
463 validate_vertex_shader_executable(struct gl_shader_program *prog,
464 struct gl_shader *shader)
465 {
466 if (shader == NULL)
467 return;
468
469 /* From the GLSL 1.10 spec, page 48:
470 *
471 * "The variable gl_Position is available only in the vertex
472 * language and is intended for writing the homogeneous vertex
473 * position. All executions of a well-formed vertex shader
474 * executable must write a value into this variable. [...] The
475 * variable gl_Position is available only in the vertex
476 * language and is intended for writing the homogeneous vertex
477 * position. All executions of a well-formed vertex shader
478 * executable must write a value into this variable."
479 *
480 * while in GLSL 1.40 this text is changed to:
481 *
482 * "The variable gl_Position is available only in the vertex
483 * language and is intended for writing the homogeneous vertex
484 * position. It can be written at any time during shader
485 * execution. It may also be read back by a vertex shader
486 * after being written. This value will be used by primitive
487 * assembly, clipping, culling, and other fixed functionality
488 * operations, if present, that operate on primitives after
489 * vertex processing has occurred. Its value is undefined if
490 * the vertex shader executable does not write gl_Position."
491 *
492 * GLSL ES 3.00 is similar to GLSL 1.40--failing to write to gl_Position is
493 * not an error.
494 */
495 if (prog->Version < (prog->IsES ? 300 : 140)) {
496 find_assignment_visitor find("gl_Position");
497 find.run(shader->ir);
498 if (!find.variable_found()) {
499 linker_error(prog, "vertex shader does not write to `gl_Position'\n");
500 return;
501 }
502 }
503
504 analyze_clip_usage("vertex", prog, shader, &prog->Vert.UsesClipDistance,
505 &prog->Vert.ClipDistanceArraySize);
506 }
507
508
509 /**
510 * Verify that a fragment shader executable meets all semantic requirements
511 *
512 * \param shader Fragment shader executable to be verified
513 */
514 void
515 validate_fragment_shader_executable(struct gl_shader_program *prog,
516 struct gl_shader *shader)
517 {
518 if (shader == NULL)
519 return;
520
521 find_assignment_visitor frag_color("gl_FragColor");
522 find_assignment_visitor frag_data("gl_FragData");
523
524 frag_color.run(shader->ir);
525 frag_data.run(shader->ir);
526
527 if (frag_color.variable_found() && frag_data.variable_found()) {
528 linker_error(prog, "fragment shader writes to both "
529 "`gl_FragColor' and `gl_FragData'\n");
530 }
531 }
532
533 /**
534 * Verify that a geometry shader executable meets all semantic requirements
535 *
536 * Also sets prog->Geom.VerticesIn, prog->Geom.UsesClipDistance, and
537 * prog->Geom.ClipDistanceArraySize as a side effect.
538 *
539 * \param shader Geometry shader executable to be verified
540 */
541 void
542 validate_geometry_shader_executable(struct gl_shader_program *prog,
543 struct gl_shader *shader)
544 {
545 if (shader == NULL)
546 return;
547
548 unsigned num_vertices = vertices_per_prim(prog->Geom.InputType);
549 prog->Geom.VerticesIn = num_vertices;
550
551 analyze_clip_usage("geometry", prog, shader, &prog->Geom.UsesClipDistance,
552 &prog->Geom.ClipDistanceArraySize);
553
554 find_end_primitive_visitor end_primitive;
555 end_primitive.run(shader->ir);
556 prog->Geom.UsesEndPrimitive = end_primitive.end_primitive_found();
557 }
558
559
560 /**
561 * Perform validation of global variables used across multiple shaders
562 */
563 void
564 cross_validate_globals(struct gl_shader_program *prog,
565 struct gl_shader **shader_list,
566 unsigned num_shaders,
567 bool uniforms_only)
568 {
569 /* Examine all of the uniforms in all of the shaders and cross validate
570 * them.
571 */
572 glsl_symbol_table variables;
573 for (unsigned i = 0; i < num_shaders; i++) {
574 if (shader_list[i] == NULL)
575 continue;
576
577 foreach_list(node, shader_list[i]->ir) {
578 ir_variable *const var = ((ir_instruction *) node)->as_variable();
579
580 if (var == NULL)
581 continue;
582
583 if (uniforms_only && (var->mode != ir_var_uniform))
584 continue;
585
586 /* Don't cross validate temporaries that are at global scope. These
587 * will eventually get pulled into the shaders 'main'.
588 */
589 if (var->mode == ir_var_temporary)
590 continue;
591
592 /* If a global with this name has already been seen, verify that the
593 * new instance has the same type. In addition, if the globals have
594 * initializers, the values of the initializers must be the same.
595 */
596 ir_variable *const existing = variables.get_variable(var->name);
597 if (existing != NULL) {
598 if (var->type != existing->type) {
599 /* Consider the types to be "the same" if both types are arrays
600 * of the same type and one of the arrays is implicitly sized.
601 * In addition, set the type of the linked variable to the
602 * explicitly sized array.
603 */
604 if (var->type->is_array()
605 && existing->type->is_array()
606 && (var->type->fields.array == existing->type->fields.array)
607 && ((var->type->length == 0)
608 || (existing->type->length == 0))) {
609 if (var->type->length != 0) {
610 existing->type = var->type;
611 }
612 } else {
613 linker_error(prog, "%s `%s' declared as type "
614 "`%s' and type `%s'\n",
615 mode_string(var),
616 var->name, var->type->name,
617 existing->type->name);
618 return;
619 }
620 }
621
622 if (var->explicit_location) {
623 if (existing->explicit_location
624 && (var->location != existing->location)) {
625 linker_error(prog, "explicit locations for %s "
626 "`%s' have differing values\n",
627 mode_string(var), var->name);
628 return;
629 }
630
631 existing->location = var->location;
632 existing->explicit_location = true;
633 }
634
635 /* From the GLSL 4.20 specification:
636 * "A link error will result if two compilation units in a program
637 * specify different integer-constant bindings for the same
638 * opaque-uniform name. However, it is not an error to specify a
639 * binding on some but not all declarations for the same name"
640 */
641 if (var->explicit_binding) {
642 if (existing->explicit_binding &&
643 var->binding != existing->binding) {
644 linker_error(prog, "explicit bindings for %s "
645 "`%s' have differing values\n",
646 mode_string(var), var->name);
647 return;
648 }
649
650 existing->binding = var->binding;
651 existing->explicit_binding = true;
652 }
653
654 if (var->type->contains_atomic() &&
655 var->atomic.offset != existing->atomic.offset) {
656 linker_error(prog, "offset specifications for %s "
657 "`%s' have differing values\n",
658 mode_string(var), var->name);
659 return;
660 }
661
662 /* Validate layout qualifiers for gl_FragDepth.
663 *
664 * From the AMD/ARB_conservative_depth specs:
665 *
666 * "If gl_FragDepth is redeclared in any fragment shader in a
667 * program, it must be redeclared in all fragment shaders in
668 * that program that have static assignments to
669 * gl_FragDepth. All redeclarations of gl_FragDepth in all
670 * fragment shaders in a single program must have the same set
671 * of qualifiers."
672 */
673 if (strcmp(var->name, "gl_FragDepth") == 0) {
674 bool layout_declared = var->depth_layout != ir_depth_layout_none;
675 bool layout_differs =
676 var->depth_layout != existing->depth_layout;
677
678 if (layout_declared && layout_differs) {
679 linker_error(prog,
680 "All redeclarations of gl_FragDepth in all "
681 "fragment shaders in a single program must have "
682 "the same set of qualifiers.");
683 }
684
685 if (var->used && layout_differs) {
686 linker_error(prog,
687 "If gl_FragDepth is redeclared with a layout "
688 "qualifier in any fragment shader, it must be "
689 "redeclared with the same layout qualifier in "
690 "all fragment shaders that have assignments to "
691 "gl_FragDepth");
692 }
693 }
694
695 /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
696 *
697 * "If a shared global has multiple initializers, the
698 * initializers must all be constant expressions, and they
699 * must all have the same value. Otherwise, a link error will
700 * result. (A shared global having only one initializer does
701 * not require that initializer to be a constant expression.)"
702 *
703 * Previous to 4.20 the GLSL spec simply said that initializers
704 * must have the same value. In this case of non-constant
705 * initializers, this was impossible to determine. As a result,
706 * no vendor actually implemented that behavior. The 4.20
707 * behavior matches the implemented behavior of at least one other
708 * vendor, so we'll implement that for all GLSL versions.
709 */
710 if (var->constant_initializer != NULL) {
711 if (existing->constant_initializer != NULL) {
712 if (!var->constant_initializer->has_value(existing->constant_initializer)) {
713 linker_error(prog, "initializers for %s "
714 "`%s' have differing values\n",
715 mode_string(var), var->name);
716 return;
717 }
718 } else {
719 /* If the first-seen instance of a particular uniform did not
720 * have an initializer but a later instance does, copy the
721 * initializer to the version stored in the symbol table.
722 */
723 /* FINISHME: This is wrong. The constant_value field should
724 * FINISHME: not be modified! Imagine a case where a shader
725 * FINISHME: without an initializer is linked in two different
726 * FINISHME: programs with shaders that have differing
727 * FINISHME: initializers. Linking with the first will
728 * FINISHME: modify the shader, and linking with the second
729 * FINISHME: will fail.
730 */
731 existing->constant_initializer =
732 var->constant_initializer->clone(ralloc_parent(existing),
733 NULL);
734 }
735 }
736
737 if (var->has_initializer) {
738 if (existing->has_initializer
739 && (var->constant_initializer == NULL
740 || existing->constant_initializer == NULL)) {
741 linker_error(prog,
742 "shared global variable `%s' has multiple "
743 "non-constant initializers.\n",
744 var->name);
745 return;
746 }
747
748 /* Some instance had an initializer, so keep track of that. In
749 * this location, all sorts of initializers (constant or
750 * otherwise) will propagate the existence to the variable
751 * stored in the symbol table.
752 */
753 existing->has_initializer = true;
754 }
755
756 if (existing->invariant != var->invariant) {
757 linker_error(prog, "declarations for %s `%s' have "
758 "mismatching invariant qualifiers\n",
759 mode_string(var), var->name);
760 return;
761 }
762 if (existing->centroid != var->centroid) {
763 linker_error(prog, "declarations for %s `%s' have "
764 "mismatching centroid qualifiers\n",
765 mode_string(var), var->name);
766 return;
767 }
768 } else
769 variables.add_variable(var);
770 }
771 }
772 }
773
774
775 /**
776 * Perform validation of uniforms used across multiple shader stages
777 */
778 void
779 cross_validate_uniforms(struct gl_shader_program *prog)
780 {
781 cross_validate_globals(prog, prog->_LinkedShaders,
782 MESA_SHADER_TYPES, true);
783 }
784
785 /**
786 * Accumulates the array of prog->UniformBlocks and checks that all
787 * definitons of blocks agree on their contents.
788 */
789 static bool
790 interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog)
791 {
792 unsigned max_num_uniform_blocks = 0;
793 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
794 if (prog->_LinkedShaders[i])
795 max_num_uniform_blocks += prog->_LinkedShaders[i]->NumUniformBlocks;
796 }
797
798 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
799 struct gl_shader *sh = prog->_LinkedShaders[i];
800
801 prog->UniformBlockStageIndex[i] = ralloc_array(prog, int,
802 max_num_uniform_blocks);
803 for (unsigned int j = 0; j < max_num_uniform_blocks; j++)
804 prog->UniformBlockStageIndex[i][j] = -1;
805
806 if (sh == NULL)
807 continue;
808
809 for (unsigned int j = 0; j < sh->NumUniformBlocks; j++) {
810 int index = link_cross_validate_uniform_block(prog,
811 &prog->UniformBlocks,
812 &prog->NumUniformBlocks,
813 &sh->UniformBlocks[j]);
814
815 if (index == -1) {
816 linker_error(prog, "uniform block `%s' has mismatching definitions",
817 sh->UniformBlocks[j].Name);
818 return false;
819 }
820
821 prog->UniformBlockStageIndex[i][index] = j;
822 }
823 }
824
825 return true;
826 }
827
828
829 /**
830 * Populates a shaders symbol table with all global declarations
831 */
832 static void
833 populate_symbol_table(gl_shader *sh)
834 {
835 sh->symbols = new(sh) glsl_symbol_table;
836
837 foreach_list(node, sh->ir) {
838 ir_instruction *const inst = (ir_instruction *) node;
839 ir_variable *var;
840 ir_function *func;
841
842 if ((func = inst->as_function()) != NULL) {
843 sh->symbols->add_function(func);
844 } else if ((var = inst->as_variable()) != NULL) {
845 sh->symbols->add_variable(var);
846 }
847 }
848 }
849
850
851 /**
852 * Remap variables referenced in an instruction tree
853 *
854 * This is used when instruction trees are cloned from one shader and placed in
855 * another. These trees will contain references to \c ir_variable nodes that
856 * do not exist in the target shader. This function finds these \c ir_variable
857 * references and replaces the references with matching variables in the target
858 * shader.
859 *
860 * If there is no matching variable in the target shader, a clone of the
861 * \c ir_variable is made and added to the target shader. The new variable is
862 * added to \b both the instruction stream and the symbol table.
863 *
864 * \param inst IR tree that is to be processed.
865 * \param symbols Symbol table containing global scope symbols in the
866 * linked shader.
867 * \param instructions Instruction stream where new variable declarations
868 * should be added.
869 */
870 void
871 remap_variables(ir_instruction *inst, struct gl_shader *target,
872 hash_table *temps)
873 {
874 class remap_visitor : public ir_hierarchical_visitor {
875 public:
876 remap_visitor(struct gl_shader *target,
877 hash_table *temps)
878 {
879 this->target = target;
880 this->symbols = target->symbols;
881 this->instructions = target->ir;
882 this->temps = temps;
883 }
884
885 virtual ir_visitor_status visit(ir_dereference_variable *ir)
886 {
887 if (ir->var->mode == ir_var_temporary) {
888 ir_variable *var = (ir_variable *) hash_table_find(temps, ir->var);
889
890 assert(var != NULL);
891 ir->var = var;
892 return visit_continue;
893 }
894
895 ir_variable *const existing =
896 this->symbols->get_variable(ir->var->name);
897 if (existing != NULL)
898 ir->var = existing;
899 else {
900 ir_variable *copy = ir->var->clone(this->target, NULL);
901
902 this->symbols->add_variable(copy);
903 this->instructions->push_head(copy);
904 ir->var = copy;
905 }
906
907 return visit_continue;
908 }
909
910 private:
911 struct gl_shader *target;
912 glsl_symbol_table *symbols;
913 exec_list *instructions;
914 hash_table *temps;
915 };
916
917 remap_visitor v(target, temps);
918
919 inst->accept(&v);
920 }
921
922
923 /**
924 * Move non-declarations from one instruction stream to another
925 *
926 * The intended usage pattern of this function is to pass the pointer to the
927 * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
928 * pointer) for \c last and \c false for \c make_copies on the first
929 * call. Successive calls pass the return value of the previous call for
930 * \c last and \c true for \c make_copies.
931 *
932 * \param instructions Source instruction stream
933 * \param last Instruction after which new instructions should be
934 * inserted in the target instruction stream
935 * \param make_copies Flag selecting whether instructions in \c instructions
936 * should be copied (via \c ir_instruction::clone) into the
937 * target list or moved.
938 *
939 * \return
940 * The new "last" instruction in the target instruction stream. This pointer
941 * is suitable for use as the \c last parameter of a later call to this
942 * function.
943 */
944 exec_node *
945 move_non_declarations(exec_list *instructions, exec_node *last,
946 bool make_copies, gl_shader *target)
947 {
948 hash_table *temps = NULL;
949
950 if (make_copies)
951 temps = hash_table_ctor(0, hash_table_pointer_hash,
952 hash_table_pointer_compare);
953
954 foreach_list_safe(node, instructions) {
955 ir_instruction *inst = (ir_instruction *) node;
956
957 if (inst->as_function())
958 continue;
959
960 ir_variable *var = inst->as_variable();
961 if ((var != NULL) && (var->mode != ir_var_temporary))
962 continue;
963
964 assert(inst->as_assignment()
965 || inst->as_call()
966 || inst->as_if() /* for initializers with the ?: operator */
967 || ((var != NULL) && (var->mode == ir_var_temporary)));
968
969 if (make_copies) {
970 inst = inst->clone(target, NULL);
971
972 if (var != NULL)
973 hash_table_insert(temps, inst, var);
974 else
975 remap_variables(inst, target, temps);
976 } else {
977 inst->remove();
978 }
979
980 last->insert_after(inst);
981 last = inst;
982 }
983
984 if (make_copies)
985 hash_table_dtor(temps);
986
987 return last;
988 }
989
990 /**
991 * Get the function signature for main from a shader
992 */
993 static ir_function_signature *
994 get_main_function_signature(gl_shader *sh)
995 {
996 ir_function *const f = sh->symbols->get_function("main");
997 if (f != NULL) {
998 exec_list void_parameters;
999
1000 /* Look for the 'void main()' signature and ensure that it's defined.
1001 * This keeps the linker from accidentally pick a shader that just
1002 * contains a prototype for main.
1003 *
1004 * We don't have to check for multiple definitions of main (in multiple
1005 * shaders) because that would have already been caught above.
1006 */
1007 ir_function_signature *sig = f->matching_signature(NULL, &void_parameters);
1008 if ((sig != NULL) && sig->is_defined) {
1009 return sig;
1010 }
1011 }
1012
1013 return NULL;
1014 }
1015
1016
1017 /**
1018 * This class is only used in link_intrastage_shaders() below but declaring
1019 * it inside that function leads to compiler warnings with some versions of
1020 * gcc.
1021 */
1022 class array_sizing_visitor : public ir_hierarchical_visitor {
1023 public:
1024 array_sizing_visitor()
1025 : mem_ctx(ralloc_context(NULL)),
1026 unnamed_interfaces(hash_table_ctor(0, hash_table_pointer_hash,
1027 hash_table_pointer_compare))
1028 {
1029 }
1030
1031 ~array_sizing_visitor()
1032 {
1033 hash_table_dtor(this->unnamed_interfaces);
1034 ralloc_free(this->mem_ctx);
1035 }
1036
1037 virtual ir_visitor_status visit(ir_variable *var)
1038 {
1039 fixup_type(&var->type, var->max_array_access);
1040 if (var->type->is_interface()) {
1041 if (interface_contains_unsized_arrays(var->type)) {
1042 const glsl_type *new_type =
1043 resize_interface_members(var->type, var->max_ifc_array_access);
1044 var->type = new_type;
1045 var->change_interface_type(new_type);
1046 }
1047 } else if (var->type->is_array() &&
1048 var->type->fields.array->is_interface()) {
1049 if (interface_contains_unsized_arrays(var->type->fields.array)) {
1050 const glsl_type *new_type =
1051 resize_interface_members(var->type->fields.array,
1052 var->max_ifc_array_access);
1053 var->change_interface_type(new_type);
1054 var->type =
1055 glsl_type::get_array_instance(new_type, var->type->length);
1056 }
1057 } else if (const glsl_type *ifc_type = var->get_interface_type()) {
1058 /* Store a pointer to the variable in the unnamed_interfaces
1059 * hashtable.
1060 */
1061 ir_variable **interface_vars = (ir_variable **)
1062 hash_table_find(this->unnamed_interfaces, ifc_type);
1063 if (interface_vars == NULL) {
1064 interface_vars = rzalloc_array(mem_ctx, ir_variable *,
1065 ifc_type->length);
1066 hash_table_insert(this->unnamed_interfaces, interface_vars,
1067 ifc_type);
1068 }
1069 unsigned index = ifc_type->field_index(var->name);
1070 assert(index < ifc_type->length);
1071 assert(interface_vars[index] == NULL);
1072 interface_vars[index] = var;
1073 }
1074 return visit_continue;
1075 }
1076
1077 /**
1078 * For each unnamed interface block that was discovered while running the
1079 * visitor, adjust the interface type to reflect the newly assigned array
1080 * sizes, and fix up the ir_variable nodes to point to the new interface
1081 * type.
1082 */
1083 void fixup_unnamed_interface_types()
1084 {
1085 hash_table_call_foreach(this->unnamed_interfaces,
1086 fixup_unnamed_interface_type, NULL);
1087 }
1088
1089 private:
1090 /**
1091 * If the type pointed to by \c type represents an unsized array, replace
1092 * it with a sized array whose size is determined by max_array_access.
1093 */
1094 static void fixup_type(const glsl_type **type, unsigned max_array_access)
1095 {
1096 if ((*type)->is_unsized_array()) {
1097 *type = glsl_type::get_array_instance((*type)->fields.array,
1098 max_array_access + 1);
1099 assert(*type != NULL);
1100 }
1101 }
1102
1103 /**
1104 * Determine whether the given interface type contains unsized arrays (if
1105 * it doesn't, array_sizing_visitor doesn't need to process it).
1106 */
1107 static bool interface_contains_unsized_arrays(const glsl_type *type)
1108 {
1109 for (unsigned i = 0; i < type->length; i++) {
1110 const glsl_type *elem_type = type->fields.structure[i].type;
1111 if (elem_type->is_unsized_array())
1112 return true;
1113 }
1114 return false;
1115 }
1116
1117 /**
1118 * Create a new interface type based on the given type, with unsized arrays
1119 * replaced by sized arrays whose size is determined by
1120 * max_ifc_array_access.
1121 */
1122 static const glsl_type *
1123 resize_interface_members(const glsl_type *type,
1124 const unsigned *max_ifc_array_access)
1125 {
1126 unsigned num_fields = type->length;
1127 glsl_struct_field *fields = new glsl_struct_field[num_fields];
1128 memcpy(fields, type->fields.structure,
1129 num_fields * sizeof(*fields));
1130 for (unsigned i = 0; i < num_fields; i++) {
1131 fixup_type(&fields[i].type, max_ifc_array_access[i]);
1132 }
1133 glsl_interface_packing packing =
1134 (glsl_interface_packing) type->interface_packing;
1135 const glsl_type *new_ifc_type =
1136 glsl_type::get_interface_instance(fields, num_fields,
1137 packing, type->name);
1138 delete [] fields;
1139 return new_ifc_type;
1140 }
1141
1142 static void fixup_unnamed_interface_type(const void *key, void *data,
1143 void *)
1144 {
1145 const glsl_type *ifc_type = (const glsl_type *) key;
1146 ir_variable **interface_vars = (ir_variable **) data;
1147 unsigned num_fields = ifc_type->length;
1148 glsl_struct_field *fields = new glsl_struct_field[num_fields];
1149 memcpy(fields, ifc_type->fields.structure,
1150 num_fields * sizeof(*fields));
1151 bool interface_type_changed = false;
1152 for (unsigned i = 0; i < num_fields; i++) {
1153 if (interface_vars[i] != NULL &&
1154 fields[i].type != interface_vars[i]->type) {
1155 fields[i].type = interface_vars[i]->type;
1156 interface_type_changed = true;
1157 }
1158 }
1159 if (!interface_type_changed) {
1160 delete [] fields;
1161 return;
1162 }
1163 glsl_interface_packing packing =
1164 (glsl_interface_packing) ifc_type->interface_packing;
1165 const glsl_type *new_ifc_type =
1166 glsl_type::get_interface_instance(fields, num_fields, packing,
1167 ifc_type->name);
1168 delete [] fields;
1169 for (unsigned i = 0; i < num_fields; i++) {
1170 if (interface_vars[i] != NULL)
1171 interface_vars[i]->change_interface_type(new_ifc_type);
1172 }
1173 }
1174
1175 /**
1176 * Memory context used to allocate the data in \c unnamed_interfaces.
1177 */
1178 void *mem_ctx;
1179
1180 /**
1181 * Hash table from const glsl_type * to an array of ir_variable *'s
1182 * pointing to the ir_variables constituting each unnamed interface block.
1183 */
1184 hash_table *unnamed_interfaces;
1185 };
1186
1187 /**
1188 * Performs the cross-validation of geometry shader max_vertices and
1189 * primitive type layout qualifiers for the attached geometry shaders,
1190 * and propagates them to the linked GS and linked shader program.
1191 */
1192 static void
1193 link_gs_inout_layout_qualifiers(struct gl_shader_program *prog,
1194 struct gl_shader *linked_shader,
1195 struct gl_shader **shader_list,
1196 unsigned num_shaders)
1197 {
1198 linked_shader->Geom.VerticesOut = 0;
1199 linked_shader->Geom.InputType = PRIM_UNKNOWN;
1200 linked_shader->Geom.OutputType = PRIM_UNKNOWN;
1201
1202 /* No in/out qualifiers defined for anything but GLSL 1.50+
1203 * geometry shaders so far.
1204 */
1205 if (linked_shader->Type != GL_GEOMETRY_SHADER || prog->Version < 150)
1206 return;
1207
1208 /* From the GLSL 1.50 spec, page 46:
1209 *
1210 * "All geometry shader output layout declarations in a program
1211 * must declare the same layout and same value for
1212 * max_vertices. There must be at least one geometry output
1213 * layout declaration somewhere in a program, but not all
1214 * geometry shaders (compilation units) are required to
1215 * declare it."
1216 */
1217
1218 for (unsigned i = 0; i < num_shaders; i++) {
1219 struct gl_shader *shader = shader_list[i];
1220
1221 if (shader->Geom.InputType != PRIM_UNKNOWN) {
1222 if (linked_shader->Geom.InputType != PRIM_UNKNOWN &&
1223 linked_shader->Geom.InputType != shader->Geom.InputType) {
1224 linker_error(prog, "geometry shader defined with conflicting "
1225 "input types\n");
1226 return;
1227 }
1228 linked_shader->Geom.InputType = shader->Geom.InputType;
1229 }
1230
1231 if (shader->Geom.OutputType != PRIM_UNKNOWN) {
1232 if (linked_shader->Geom.OutputType != PRIM_UNKNOWN &&
1233 linked_shader->Geom.OutputType != shader->Geom.OutputType) {
1234 linker_error(prog, "geometry shader defined with conflicting "
1235 "output types\n");
1236 return;
1237 }
1238 linked_shader->Geom.OutputType = shader->Geom.OutputType;
1239 }
1240
1241 if (shader->Geom.VerticesOut != 0) {
1242 if (linked_shader->Geom.VerticesOut != 0 &&
1243 linked_shader->Geom.VerticesOut != shader->Geom.VerticesOut) {
1244 linker_error(prog, "geometry shader defined with conflicting "
1245 "output vertex count (%d and %d)\n",
1246 linked_shader->Geom.VerticesOut,
1247 shader->Geom.VerticesOut);
1248 return;
1249 }
1250 linked_shader->Geom.VerticesOut = shader->Geom.VerticesOut;
1251 }
1252 }
1253
1254 /* Just do the intrastage -> interstage propagation right now,
1255 * since we already know we're in the right type of shader program
1256 * for doing it.
1257 */
1258 if (linked_shader->Geom.InputType == PRIM_UNKNOWN) {
1259 linker_error(prog,
1260 "geometry shader didn't declare primitive input type\n");
1261 return;
1262 }
1263 prog->Geom.InputType = linked_shader->Geom.InputType;
1264
1265 if (linked_shader->Geom.OutputType == PRIM_UNKNOWN) {
1266 linker_error(prog,
1267 "geometry shader didn't declare primitive output type\n");
1268 return;
1269 }
1270 prog->Geom.OutputType = linked_shader->Geom.OutputType;
1271
1272 if (linked_shader->Geom.VerticesOut == 0) {
1273 linker_error(prog,
1274 "geometry shader didn't declare max_vertices\n");
1275 return;
1276 }
1277 prog->Geom.VerticesOut = linked_shader->Geom.VerticesOut;
1278 }
1279
1280 /**
1281 * Combine a group of shaders for a single stage to generate a linked shader
1282 *
1283 * \note
1284 * If this function is supplied a single shader, it is cloned, and the new
1285 * shader is returned.
1286 */
1287 static struct gl_shader *
1288 link_intrastage_shaders(void *mem_ctx,
1289 struct gl_context *ctx,
1290 struct gl_shader_program *prog,
1291 struct gl_shader **shader_list,
1292 unsigned num_shaders)
1293 {
1294 struct gl_uniform_block *uniform_blocks = NULL;
1295
1296 /* Check that global variables defined in multiple shaders are consistent.
1297 */
1298 cross_validate_globals(prog, shader_list, num_shaders, false);
1299 if (!prog->LinkStatus)
1300 return NULL;
1301
1302 /* Check that interface blocks defined in multiple shaders are consistent.
1303 */
1304 validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
1305 num_shaders);
1306 if (!prog->LinkStatus)
1307 return NULL;
1308
1309 /* Link up uniform blocks defined within this stage. */
1310 const unsigned num_uniform_blocks =
1311 link_uniform_blocks(mem_ctx, prog, shader_list, num_shaders,
1312 &uniform_blocks);
1313
1314 /* Check that there is only a single definition of each function signature
1315 * across all shaders.
1316 */
1317 for (unsigned i = 0; i < (num_shaders - 1); i++) {
1318 foreach_list(node, shader_list[i]->ir) {
1319 ir_function *const f = ((ir_instruction *) node)->as_function();
1320
1321 if (f == NULL)
1322 continue;
1323
1324 for (unsigned j = i + 1; j < num_shaders; j++) {
1325 ir_function *const other =
1326 shader_list[j]->symbols->get_function(f->name);
1327
1328 /* If the other shader has no function (and therefore no function
1329 * signatures) with the same name, skip to the next shader.
1330 */
1331 if (other == NULL)
1332 continue;
1333
1334 foreach_iter (exec_list_iterator, iter, *f) {
1335 ir_function_signature *sig =
1336 (ir_function_signature *) iter.get();
1337
1338 if (!sig->is_defined || sig->is_builtin())
1339 continue;
1340
1341 ir_function_signature *other_sig =
1342 other->exact_matching_signature(NULL, &sig->parameters);
1343
1344 if ((other_sig != NULL) && other_sig->is_defined
1345 && !other_sig->is_builtin()) {
1346 linker_error(prog, "function `%s' is multiply defined",
1347 f->name);
1348 return NULL;
1349 }
1350 }
1351 }
1352 }
1353 }
1354
1355 /* Find the shader that defines main, and make a clone of it.
1356 *
1357 * Starting with the clone, search for undefined references. If one is
1358 * found, find the shader that defines it. Clone the reference and add
1359 * it to the shader. Repeat until there are no undefined references or
1360 * until a reference cannot be resolved.
1361 */
1362 gl_shader *main = NULL;
1363 for (unsigned i = 0; i < num_shaders; i++) {
1364 if (get_main_function_signature(shader_list[i]) != NULL) {
1365 main = shader_list[i];
1366 break;
1367 }
1368 }
1369
1370 if (main == NULL) {
1371 linker_error(prog, "%s shader lacks `main'\n",
1372 _mesa_glsl_shader_target_name(shader_list[0]->Type));
1373 return NULL;
1374 }
1375
1376 gl_shader *linked = ctx->Driver.NewShader(NULL, 0, main->Type);
1377 linked->ir = new(linked) exec_list;
1378 clone_ir_list(mem_ctx, linked->ir, main->ir);
1379
1380 linked->UniformBlocks = uniform_blocks;
1381 linked->NumUniformBlocks = num_uniform_blocks;
1382 ralloc_steal(linked, linked->UniformBlocks);
1383
1384 link_gs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
1385
1386 populate_symbol_table(linked);
1387
1388 /* The a pointer to the main function in the final linked shader (i.e., the
1389 * copy of the original shader that contained the main function).
1390 */
1391 ir_function_signature *const main_sig = get_main_function_signature(linked);
1392
1393 /* Move any instructions other than variable declarations or function
1394 * declarations into main.
1395 */
1396 exec_node *insertion_point =
1397 move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
1398 linked);
1399
1400 for (unsigned i = 0; i < num_shaders; i++) {
1401 if (shader_list[i] == main)
1402 continue;
1403
1404 insertion_point = move_non_declarations(shader_list[i]->ir,
1405 insertion_point, true, linked);
1406 }
1407
1408 /* Check if any shader needs built-in functions. */
1409 bool need_builtins = false;
1410 for (unsigned i = 0; i < num_shaders; i++) {
1411 if (shader_list[i]->uses_builtin_functions) {
1412 need_builtins = true;
1413 break;
1414 }
1415 }
1416
1417 bool ok;
1418 if (need_builtins) {
1419 /* Make a temporary array one larger than shader_list, which will hold
1420 * the built-in function shader as well.
1421 */
1422 gl_shader **linking_shaders = (gl_shader **)
1423 calloc(num_shaders + 1, sizeof(gl_shader *));
1424 memcpy(linking_shaders, shader_list, num_shaders * sizeof(gl_shader *));
1425 linking_shaders[num_shaders] = _mesa_glsl_get_builtin_function_shader();
1426
1427 ok = link_function_calls(prog, linked, linking_shaders, num_shaders + 1);
1428
1429 free(linking_shaders);
1430 } else {
1431 ok = link_function_calls(prog, linked, shader_list, num_shaders);
1432 }
1433
1434
1435 if (!ok) {
1436 ctx->Driver.DeleteShader(ctx, linked);
1437 return NULL;
1438 }
1439
1440 /* At this point linked should contain all of the linked IR, so
1441 * validate it to make sure nothing went wrong.
1442 */
1443 validate_ir_tree(linked->ir);
1444
1445 /* Set the size of geometry shader input arrays */
1446 if (linked->Type == GL_GEOMETRY_SHADER) {
1447 unsigned num_vertices = vertices_per_prim(prog->Geom.InputType);
1448 geom_array_resize_visitor input_resize_visitor(num_vertices, prog);
1449 foreach_iter(exec_list_iterator, iter, *linked->ir) {
1450 ir_instruction *ir = (ir_instruction *)iter.get();
1451 ir->accept(&input_resize_visitor);
1452 }
1453 }
1454
1455 /* Make a pass over all variable declarations to ensure that arrays with
1456 * unspecified sizes have a size specified. The size is inferred from the
1457 * max_array_access field.
1458 */
1459 array_sizing_visitor v;
1460 v.run(linked->ir);
1461 v.fixup_unnamed_interface_types();
1462
1463 return linked;
1464 }
1465
1466 /**
1467 * Update the sizes of linked shader uniform arrays to the maximum
1468 * array index used.
1469 *
1470 * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
1471 *
1472 * If one or more elements of an array are active,
1473 * GetActiveUniform will return the name of the array in name,
1474 * subject to the restrictions listed above. The type of the array
1475 * is returned in type. The size parameter contains the highest
1476 * array element index used, plus one. The compiler or linker
1477 * determines the highest index used. There will be only one
1478 * active uniform reported by the GL per uniform array.
1479
1480 */
1481 static void
1482 update_array_sizes(struct gl_shader_program *prog)
1483 {
1484 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
1485 if (prog->_LinkedShaders[i] == NULL)
1486 continue;
1487
1488 foreach_list(node, prog->_LinkedShaders[i]->ir) {
1489 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1490
1491 if ((var == NULL) || (var->mode != ir_var_uniform) ||
1492 !var->type->is_array())
1493 continue;
1494
1495 /* GL_ARB_uniform_buffer_object says that std140 uniforms
1496 * will not be eliminated. Since we always do std140, just
1497 * don't resize arrays in UBOs.
1498 *
1499 * Atomic counters are supposed to get deterministic
1500 * locations assigned based on the declaration ordering and
1501 * sizes, array compaction would mess that up.
1502 */
1503 if (var->is_in_uniform_block() || var->type->contains_atomic())
1504 continue;
1505
1506 unsigned int size = var->max_array_access;
1507 for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) {
1508 if (prog->_LinkedShaders[j] == NULL)
1509 continue;
1510
1511 foreach_list(node2, prog->_LinkedShaders[j]->ir) {
1512 ir_variable *other_var = ((ir_instruction *) node2)->as_variable();
1513 if (!other_var)
1514 continue;
1515
1516 if (strcmp(var->name, other_var->name) == 0 &&
1517 other_var->max_array_access > size) {
1518 size = other_var->max_array_access;
1519 }
1520 }
1521 }
1522
1523 if (size + 1 != var->type->length) {
1524 /* If this is a built-in uniform (i.e., it's backed by some
1525 * fixed-function state), adjust the number of state slots to
1526 * match the new array size. The number of slots per array entry
1527 * is not known. It seems safe to assume that the total number of
1528 * slots is an integer multiple of the number of array elements.
1529 * Determine the number of slots per array element by dividing by
1530 * the old (total) size.
1531 */
1532 if (var->num_state_slots > 0) {
1533 var->num_state_slots = (size + 1)
1534 * (var->num_state_slots / var->type->length);
1535 }
1536
1537 var->type = glsl_type::get_array_instance(var->type->fields.array,
1538 size + 1);
1539 /* FINISHME: We should update the types of array
1540 * dereferences of this variable now.
1541 */
1542 }
1543 }
1544 }
1545 }
1546
1547 /**
1548 * Find a contiguous set of available bits in a bitmask.
1549 *
1550 * \param used_mask Bits representing used (1) and unused (0) locations
1551 * \param needed_count Number of contiguous bits needed.
1552 *
1553 * \return
1554 * Base location of the available bits on success or -1 on failure.
1555 */
1556 int
1557 find_available_slots(unsigned used_mask, unsigned needed_count)
1558 {
1559 unsigned needed_mask = (1 << needed_count) - 1;
1560 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
1561
1562 /* The comparison to 32 is redundant, but without it GCC emits "warning:
1563 * cannot optimize possibly infinite loops" for the loop below.
1564 */
1565 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
1566 return -1;
1567
1568 for (int i = 0; i <= max_bit_to_test; i++) {
1569 if ((needed_mask & ~used_mask) == needed_mask)
1570 return i;
1571
1572 needed_mask <<= 1;
1573 }
1574
1575 return -1;
1576 }
1577
1578
1579 /**
1580 * Assign locations for either VS inputs for FS outputs
1581 *
1582 * \param prog Shader program whose variables need locations assigned
1583 * \param target_index Selector for the program target to receive location
1584 * assignmnets. Must be either \c MESA_SHADER_VERTEX or
1585 * \c MESA_SHADER_FRAGMENT.
1586 * \param max_index Maximum number of generic locations. This corresponds
1587 * to either the maximum number of draw buffers or the
1588 * maximum number of generic attributes.
1589 *
1590 * \return
1591 * If locations are successfully assigned, true is returned. Otherwise an
1592 * error is emitted to the shader link log and false is returned.
1593 */
1594 bool
1595 assign_attribute_or_color_locations(gl_shader_program *prog,
1596 unsigned target_index,
1597 unsigned max_index)
1598 {
1599 /* Mark invalid locations as being used.
1600 */
1601 unsigned used_locations = (max_index >= 32)
1602 ? ~0 : ~((1 << max_index) - 1);
1603
1604 assert((target_index == MESA_SHADER_VERTEX)
1605 || (target_index == MESA_SHADER_FRAGMENT));
1606
1607 gl_shader *const sh = prog->_LinkedShaders[target_index];
1608 if (sh == NULL)
1609 return true;
1610
1611 /* Operate in a total of four passes.
1612 *
1613 * 1. Invalidate the location assignments for all vertex shader inputs.
1614 *
1615 * 2. Assign locations for inputs that have user-defined (via
1616 * glBindVertexAttribLocation) locations and outputs that have
1617 * user-defined locations (via glBindFragDataLocation).
1618 *
1619 * 3. Sort the attributes without assigned locations by number of slots
1620 * required in decreasing order. Fragmentation caused by attribute
1621 * locations assigned by the application may prevent large attributes
1622 * from having enough contiguous space.
1623 *
1624 * 4. Assign locations to any inputs without assigned locations.
1625 */
1626
1627 const int generic_base = (target_index == MESA_SHADER_VERTEX)
1628 ? (int) VERT_ATTRIB_GENERIC0 : (int) FRAG_RESULT_DATA0;
1629
1630 const enum ir_variable_mode direction =
1631 (target_index == MESA_SHADER_VERTEX)
1632 ? ir_var_shader_in : ir_var_shader_out;
1633
1634
1635 /* Temporary storage for the set of attributes that need locations assigned.
1636 */
1637 struct temp_attr {
1638 unsigned slots;
1639 ir_variable *var;
1640
1641 /* Used below in the call to qsort. */
1642 static int compare(const void *a, const void *b)
1643 {
1644 const temp_attr *const l = (const temp_attr *) a;
1645 const temp_attr *const r = (const temp_attr *) b;
1646
1647 /* Reversed because we want a descending order sort below. */
1648 return r->slots - l->slots;
1649 }
1650 } to_assign[16];
1651
1652 unsigned num_attr = 0;
1653
1654 foreach_list(node, sh->ir) {
1655 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1656
1657 if ((var == NULL) || (var->mode != (unsigned) direction))
1658 continue;
1659
1660 if (var->explicit_location) {
1661 if ((var->location >= (int)(max_index + generic_base))
1662 || (var->location < 0)) {
1663 linker_error(prog,
1664 "invalid explicit location %d specified for `%s'\n",
1665 (var->location < 0)
1666 ? var->location : var->location - generic_base,
1667 var->name);
1668 return false;
1669 }
1670 } else if (target_index == MESA_SHADER_VERTEX) {
1671 unsigned binding;
1672
1673 if (prog->AttributeBindings->get(binding, var->name)) {
1674 assert(binding >= VERT_ATTRIB_GENERIC0);
1675 var->location = binding;
1676 var->is_unmatched_generic_inout = 0;
1677 }
1678 } else if (target_index == MESA_SHADER_FRAGMENT) {
1679 unsigned binding;
1680 unsigned index;
1681
1682 if (prog->FragDataBindings->get(binding, var->name)) {
1683 assert(binding >= FRAG_RESULT_DATA0);
1684 var->location = binding;
1685 var->is_unmatched_generic_inout = 0;
1686
1687 if (prog->FragDataIndexBindings->get(index, var->name)) {
1688 var->index = index;
1689 }
1690 }
1691 }
1692
1693 /* If the variable is not a built-in and has a location statically
1694 * assigned in the shader (presumably via a layout qualifier), make sure
1695 * that it doesn't collide with other assigned locations. Otherwise,
1696 * add it to the list of variables that need linker-assigned locations.
1697 */
1698 const unsigned slots = var->type->count_attribute_slots();
1699 if (var->location != -1) {
1700 if (var->location >= generic_base && var->index < 1) {
1701 /* From page 61 of the OpenGL 4.0 spec:
1702 *
1703 * "LinkProgram will fail if the attribute bindings assigned
1704 * by BindAttribLocation do not leave not enough space to
1705 * assign a location for an active matrix attribute or an
1706 * active attribute array, both of which require multiple
1707 * contiguous generic attributes."
1708 *
1709 * Previous versions of the spec contain similar language but omit
1710 * the bit about attribute arrays.
1711 *
1712 * Page 61 of the OpenGL 4.0 spec also says:
1713 *
1714 * "It is possible for an application to bind more than one
1715 * attribute name to the same location. This is referred to as
1716 * aliasing. This will only work if only one of the aliased
1717 * attributes is active in the executable program, or if no
1718 * path through the shader consumes more than one attribute of
1719 * a set of attributes aliased to the same location. A link
1720 * error can occur if the linker determines that every path
1721 * through the shader consumes multiple aliased attributes,
1722 * but implementations are not required to generate an error
1723 * in this case."
1724 *
1725 * These two paragraphs are either somewhat contradictory, or I
1726 * don't fully understand one or both of them.
1727 */
1728 /* FINISHME: The code as currently written does not support
1729 * FINISHME: attribute location aliasing (see comment above).
1730 */
1731 /* Mask representing the contiguous slots that will be used by
1732 * this attribute.
1733 */
1734 const unsigned attr = var->location - generic_base;
1735 const unsigned use_mask = (1 << slots) - 1;
1736
1737 /* Generate a link error if the set of bits requested for this
1738 * attribute overlaps any previously allocated bits.
1739 */
1740 if ((~(use_mask << attr) & used_locations) != used_locations) {
1741 const char *const string = (target_index == MESA_SHADER_VERTEX)
1742 ? "vertex shader input" : "fragment shader output";
1743 linker_error(prog,
1744 "insufficient contiguous locations "
1745 "available for %s `%s' %d %d %d", string,
1746 var->name, used_locations, use_mask, attr);
1747 return false;
1748 }
1749
1750 used_locations |= (use_mask << attr);
1751 }
1752
1753 continue;
1754 }
1755
1756 to_assign[num_attr].slots = slots;
1757 to_assign[num_attr].var = var;
1758 num_attr++;
1759 }
1760
1761 /* If all of the attributes were assigned locations by the application (or
1762 * are built-in attributes with fixed locations), return early. This should
1763 * be the common case.
1764 */
1765 if (num_attr == 0)
1766 return true;
1767
1768 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
1769
1770 if (target_index == MESA_SHADER_VERTEX) {
1771 /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS. It can
1772 * only be explicitly assigned by via glBindAttribLocation. Mark it as
1773 * reserved to prevent it from being automatically allocated below.
1774 */
1775 find_deref_visitor find("gl_Vertex");
1776 find.run(sh->ir);
1777 if (find.variable_found())
1778 used_locations |= (1 << 0);
1779 }
1780
1781 for (unsigned i = 0; i < num_attr; i++) {
1782 /* Mask representing the contiguous slots that will be used by this
1783 * attribute.
1784 */
1785 const unsigned use_mask = (1 << to_assign[i].slots) - 1;
1786
1787 int location = find_available_slots(used_locations, to_assign[i].slots);
1788
1789 if (location < 0) {
1790 const char *const string = (target_index == MESA_SHADER_VERTEX)
1791 ? "vertex shader input" : "fragment shader output";
1792
1793 linker_error(prog,
1794 "insufficient contiguous locations "
1795 "available for %s `%s'",
1796 string, to_assign[i].var->name);
1797 return false;
1798 }
1799
1800 to_assign[i].var->location = generic_base + location;
1801 to_assign[i].var->is_unmatched_generic_inout = 0;
1802 used_locations |= (use_mask << location);
1803 }
1804
1805 return true;
1806 }
1807
1808
1809 /**
1810 * Demote shader inputs and outputs that are not used in other stages
1811 */
1812 void
1813 demote_shader_inputs_and_outputs(gl_shader *sh, enum ir_variable_mode mode)
1814 {
1815 foreach_list(node, sh->ir) {
1816 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1817
1818 if ((var == NULL) || (var->mode != int(mode)))
1819 continue;
1820
1821 /* A shader 'in' or 'out' variable is only really an input or output if
1822 * its value is used by other shader stages. This will cause the variable
1823 * to have a location assigned.
1824 */
1825 if (var->is_unmatched_generic_inout) {
1826 var->mode = ir_var_auto;
1827 }
1828 }
1829 }
1830
1831
1832 /**
1833 * Store the gl_FragDepth layout in the gl_shader_program struct.
1834 */
1835 static void
1836 store_fragdepth_layout(struct gl_shader_program *prog)
1837 {
1838 if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
1839 return;
1840 }
1841
1842 struct exec_list *ir = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir;
1843
1844 /* We don't look up the gl_FragDepth symbol directly because if
1845 * gl_FragDepth is not used in the shader, it's removed from the IR.
1846 * However, the symbol won't be removed from the symbol table.
1847 *
1848 * We're only interested in the cases where the variable is NOT removed
1849 * from the IR.
1850 */
1851 foreach_list(node, ir) {
1852 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1853
1854 if (var == NULL || var->mode != ir_var_shader_out) {
1855 continue;
1856 }
1857
1858 if (strcmp(var->name, "gl_FragDepth") == 0) {
1859 switch (var->depth_layout) {
1860 case ir_depth_layout_none:
1861 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
1862 return;
1863 case ir_depth_layout_any:
1864 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
1865 return;
1866 case ir_depth_layout_greater:
1867 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
1868 return;
1869 case ir_depth_layout_less:
1870 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
1871 return;
1872 case ir_depth_layout_unchanged:
1873 prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
1874 return;
1875 default:
1876 assert(0);
1877 return;
1878 }
1879 }
1880 }
1881 }
1882
1883 /**
1884 * Validate the resources used by a program versus the implementation limits
1885 */
1886 static void
1887 check_resources(struct gl_context *ctx, struct gl_shader_program *prog)
1888 {
1889 static const char *const shader_names[MESA_SHADER_TYPES] = {
1890 "vertex", "geometry", "fragment"
1891 };
1892
1893 const unsigned max_samplers[MESA_SHADER_TYPES] = {
1894 ctx->Const.VertexProgram.MaxTextureImageUnits,
1895 ctx->Const.GeometryProgram.MaxTextureImageUnits,
1896 ctx->Const.FragmentProgram.MaxTextureImageUnits
1897 };
1898
1899 const unsigned max_default_uniform_components[MESA_SHADER_TYPES] = {
1900 ctx->Const.VertexProgram.MaxUniformComponents,
1901 ctx->Const.GeometryProgram.MaxUniformComponents,
1902 ctx->Const.FragmentProgram.MaxUniformComponents
1903 };
1904
1905 const unsigned max_combined_uniform_components[MESA_SHADER_TYPES] = {
1906 ctx->Const.VertexProgram.MaxCombinedUniformComponents,
1907 ctx->Const.GeometryProgram.MaxCombinedUniformComponents,
1908 ctx->Const.FragmentProgram.MaxCombinedUniformComponents
1909 };
1910
1911 const unsigned max_uniform_blocks[MESA_SHADER_TYPES] = {
1912 ctx->Const.VertexProgram.MaxUniformBlocks,
1913 ctx->Const.GeometryProgram.MaxUniformBlocks,
1914 ctx->Const.FragmentProgram.MaxUniformBlocks
1915 };
1916
1917 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
1918 struct gl_shader *sh = prog->_LinkedShaders[i];
1919
1920 if (sh == NULL)
1921 continue;
1922
1923 if (sh->num_samplers > max_samplers[i]) {
1924 linker_error(prog, "Too many %s shader texture samplers",
1925 shader_names[i]);
1926 }
1927
1928 if (sh->num_uniform_components > max_default_uniform_components[i]) {
1929 if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
1930 linker_warning(prog, "Too many %s shader default uniform block "
1931 "components, but the driver will try to optimize "
1932 "them out; this is non-portable out-of-spec "
1933 "behavior\n",
1934 shader_names[i]);
1935 } else {
1936 linker_error(prog, "Too many %s shader default uniform block "
1937 "components",
1938 shader_names[i]);
1939 }
1940 }
1941
1942 if (sh->num_combined_uniform_components >
1943 max_combined_uniform_components[i]) {
1944 if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
1945 linker_warning(prog, "Too many %s shader uniform components, "
1946 "but the driver will try to optimize them out; "
1947 "this is non-portable out-of-spec behavior\n",
1948 shader_names[i]);
1949 } else {
1950 linker_error(prog, "Too many %s shader uniform components",
1951 shader_names[i]);
1952 }
1953 }
1954 }
1955
1956 unsigned blocks[MESA_SHADER_TYPES] = {0};
1957 unsigned total_uniform_blocks = 0;
1958
1959 for (unsigned i = 0; i < prog->NumUniformBlocks; i++) {
1960 for (unsigned j = 0; j < MESA_SHADER_TYPES; j++) {
1961 if (prog->UniformBlockStageIndex[j][i] != -1) {
1962 blocks[j]++;
1963 total_uniform_blocks++;
1964 }
1965 }
1966
1967 if (total_uniform_blocks > ctx->Const.MaxCombinedUniformBlocks) {
1968 linker_error(prog, "Too many combined uniform blocks (%d/%d)",
1969 prog->NumUniformBlocks,
1970 ctx->Const.MaxCombinedUniformBlocks);
1971 } else {
1972 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
1973 if (blocks[i] > max_uniform_blocks[i]) {
1974 linker_error(prog, "Too many %s uniform blocks (%d/%d)",
1975 shader_names[i],
1976 blocks[i],
1977 max_uniform_blocks[i]);
1978 break;
1979 }
1980 }
1981 }
1982 }
1983 }
1984
1985 void
1986 link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
1987 {
1988 tfeedback_decl *tfeedback_decls = NULL;
1989 unsigned num_tfeedback_decls = prog->TransformFeedback.NumVarying;
1990
1991 void *mem_ctx = ralloc_context(NULL); // temporary linker context
1992
1993 prog->LinkStatus = true; /* All error paths will set this to false */
1994 prog->Validated = false;
1995 prog->_Used = false;
1996
1997 ralloc_free(prog->InfoLog);
1998 prog->InfoLog = ralloc_strdup(NULL, "");
1999
2000 ralloc_free(prog->UniformBlocks);
2001 prog->UniformBlocks = NULL;
2002 prog->NumUniformBlocks = 0;
2003 for (int i = 0; i < MESA_SHADER_TYPES; i++) {
2004 ralloc_free(prog->UniformBlockStageIndex[i]);
2005 prog->UniformBlockStageIndex[i] = NULL;
2006 }
2007
2008 ralloc_free(prog->AtomicBuffers);
2009 prog->AtomicBuffers = NULL;
2010 prog->NumAtomicBuffers = 0;
2011
2012 /* Separate the shaders into groups based on their type.
2013 */
2014 struct gl_shader **vert_shader_list;
2015 unsigned num_vert_shaders = 0;
2016 struct gl_shader **frag_shader_list;
2017 unsigned num_frag_shaders = 0;
2018 struct gl_shader **geom_shader_list;
2019 unsigned num_geom_shaders = 0;
2020
2021 vert_shader_list = (struct gl_shader **)
2022 calloc(prog->NumShaders, sizeof(struct gl_shader *));
2023 frag_shader_list = (struct gl_shader **)
2024 calloc(prog->NumShaders, sizeof(struct gl_shader *));
2025 geom_shader_list = (struct gl_shader **)
2026 calloc(prog->NumShaders, sizeof(struct gl_shader *));
2027
2028 unsigned min_version = UINT_MAX;
2029 unsigned max_version = 0;
2030 const bool is_es_prog =
2031 (prog->NumShaders > 0 && prog->Shaders[0]->IsES) ? true : false;
2032 for (unsigned i = 0; i < prog->NumShaders; i++) {
2033 min_version = MIN2(min_version, prog->Shaders[i]->Version);
2034 max_version = MAX2(max_version, prog->Shaders[i]->Version);
2035
2036 if (prog->Shaders[i]->IsES != is_es_prog) {
2037 linker_error(prog, "all shaders must use same shading "
2038 "language version\n");
2039 goto done;
2040 }
2041
2042 switch (prog->Shaders[i]->Type) {
2043 case GL_VERTEX_SHADER:
2044 vert_shader_list[num_vert_shaders] = prog->Shaders[i];
2045 num_vert_shaders++;
2046 break;
2047 case GL_FRAGMENT_SHADER:
2048 frag_shader_list[num_frag_shaders] = prog->Shaders[i];
2049 num_frag_shaders++;
2050 break;
2051 case GL_GEOMETRY_SHADER:
2052 geom_shader_list[num_geom_shaders] = prog->Shaders[i];
2053 num_geom_shaders++;
2054 break;
2055 }
2056 }
2057
2058 /* In desktop GLSL, different shader versions may be linked together. In
2059 * GLSL ES, all shader versions must be the same.
2060 */
2061 if (is_es_prog && min_version != max_version) {
2062 linker_error(prog, "all shaders must use same shading "
2063 "language version\n");
2064 goto done;
2065 }
2066
2067 prog->Version = max_version;
2068 prog->IsES = is_es_prog;
2069
2070 /* Geometry shaders have to be linked with vertex shaders.
2071 */
2072 if (num_geom_shaders > 0 && num_vert_shaders == 0) {
2073 linker_error(prog, "Geometry shader must be linked with "
2074 "vertex shader\n");
2075 goto done;
2076 }
2077
2078 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2079 if (prog->_LinkedShaders[i] != NULL)
2080 ctx->Driver.DeleteShader(ctx, prog->_LinkedShaders[i]);
2081
2082 prog->_LinkedShaders[i] = NULL;
2083 }
2084
2085 /* Link all shaders for a particular stage and validate the result.
2086 */
2087 if (num_vert_shaders > 0) {
2088 gl_shader *const sh =
2089 link_intrastage_shaders(mem_ctx, ctx, prog, vert_shader_list,
2090 num_vert_shaders);
2091
2092 if (!prog->LinkStatus)
2093 goto done;
2094
2095 validate_vertex_shader_executable(prog, sh);
2096 if (!prog->LinkStatus)
2097 goto done;
2098 prog->LastClipDistanceArraySize = prog->Vert.ClipDistanceArraySize;
2099
2100 _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_VERTEX],
2101 sh);
2102 }
2103
2104 if (num_frag_shaders > 0) {
2105 gl_shader *const sh =
2106 link_intrastage_shaders(mem_ctx, ctx, prog, frag_shader_list,
2107 num_frag_shaders);
2108
2109 if (!prog->LinkStatus)
2110 goto done;
2111
2112 validate_fragment_shader_executable(prog, sh);
2113 if (!prog->LinkStatus)
2114 goto done;
2115
2116 _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_FRAGMENT],
2117 sh);
2118 }
2119
2120 if (num_geom_shaders > 0) {
2121 gl_shader *const sh =
2122 link_intrastage_shaders(mem_ctx, ctx, prog, geom_shader_list,
2123 num_geom_shaders);
2124
2125 if (!prog->LinkStatus)
2126 goto done;
2127
2128 validate_geometry_shader_executable(prog, sh);
2129 if (!prog->LinkStatus)
2130 goto done;
2131 prog->LastClipDistanceArraySize = prog->Geom.ClipDistanceArraySize;
2132
2133 _mesa_reference_shader(ctx, &prog->_LinkedShaders[MESA_SHADER_GEOMETRY],
2134 sh);
2135 }
2136
2137 /* Here begins the inter-stage linking phase. Some initial validation is
2138 * performed, then locations are assigned for uniforms, attributes, and
2139 * varyings.
2140 */
2141 cross_validate_uniforms(prog);
2142 if (!prog->LinkStatus)
2143 goto done;
2144
2145 unsigned prev;
2146
2147 for (prev = 0; prev < MESA_SHADER_TYPES; prev++) {
2148 if (prog->_LinkedShaders[prev] != NULL)
2149 break;
2150 }
2151
2152 /* Validate the inputs of each stage with the output of the preceding
2153 * stage.
2154 */
2155 for (unsigned i = prev + 1; i < MESA_SHADER_TYPES; i++) {
2156 if (prog->_LinkedShaders[i] == NULL)
2157 continue;
2158
2159 validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
2160 prog->_LinkedShaders[i]);
2161 if (!prog->LinkStatus)
2162 goto done;
2163
2164 cross_validate_outputs_to_inputs(prog,
2165 prog->_LinkedShaders[prev],
2166 prog->_LinkedShaders[i]);
2167 if (!prog->LinkStatus)
2168 goto done;
2169
2170 prev = i;
2171 }
2172
2173 /* Cross-validate uniform blocks between shader stages */
2174 validate_interstage_uniform_blocks(prog, prog->_LinkedShaders,
2175 MESA_SHADER_TYPES);
2176 if (!prog->LinkStatus)
2177 goto done;
2178
2179 for (unsigned int i = 0; i < MESA_SHADER_TYPES; i++) {
2180 if (prog->_LinkedShaders[i] != NULL)
2181 lower_named_interface_blocks(mem_ctx, prog->_LinkedShaders[i]);
2182 }
2183
2184 /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
2185 * it before optimization because we want most of the checks to get
2186 * dropped thanks to constant propagation.
2187 *
2188 * This rule also applies to GLSL ES 3.00.
2189 */
2190 if (max_version >= (is_es_prog ? 300 : 130)) {
2191 struct gl_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
2192 if (sh) {
2193 lower_discard_flow(sh->ir);
2194 }
2195 }
2196
2197 if (!interstage_cross_validate_uniform_blocks(prog))
2198 goto done;
2199
2200 /* Do common optimization before assigning storage for attributes,
2201 * uniforms, and varyings. Later optimization could possibly make
2202 * some of that unused.
2203 */
2204 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
2205 if (prog->_LinkedShaders[i] == NULL)
2206 continue;
2207
2208 detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
2209 if (!prog->LinkStatus)
2210 goto done;
2211
2212 if (ctx->ShaderCompilerOptions[i].LowerClipDistance) {
2213 lower_clip_distance(prog->_LinkedShaders[i]);
2214 }
2215
2216 unsigned max_unroll = ctx->ShaderCompilerOptions[i].MaxUnrollIterations;
2217
2218 while (do_common_optimization(prog->_LinkedShaders[i]->ir, true, false, max_unroll, &ctx->ShaderCompilerOptions[i]))
2219 ;
2220 }
2221
2222 /* Mark all generic shader inputs and outputs as unpaired. */
2223 if (prog->_LinkedShaders[MESA_SHADER_VERTEX] != NULL) {
2224 link_invalidate_variable_locations(
2225 prog->_LinkedShaders[MESA_SHADER_VERTEX]->ir);
2226 }
2227 if (prog->_LinkedShaders[MESA_SHADER_GEOMETRY] != NULL) {
2228 link_invalidate_variable_locations(
2229 prog->_LinkedShaders[MESA_SHADER_GEOMETRY]->ir);
2230 }
2231 if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] != NULL) {
2232 link_invalidate_variable_locations(
2233 prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir);
2234 }
2235
2236 /* FINISHME: The value of the max_attribute_index parameter is
2237 * FINISHME: implementation dependent based on the value of
2238 * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
2239 * FINISHME: at least 16, so hardcode 16 for now.
2240 */
2241 if (!assign_attribute_or_color_locations(prog, MESA_SHADER_VERTEX, 16)) {
2242 goto done;
2243 }
2244
2245 if (!assign_attribute_or_color_locations(prog, MESA_SHADER_FRAGMENT, MAX2(ctx->Const.MaxDrawBuffers, ctx->Const.MaxDualSourceDrawBuffers))) {
2246 goto done;
2247 }
2248
2249 unsigned first;
2250 for (first = 0; first < MESA_SHADER_TYPES; first++) {
2251 if (prog->_LinkedShaders[first] != NULL)
2252 break;
2253 }
2254
2255 if (num_tfeedback_decls != 0) {
2256 /* From GL_EXT_transform_feedback:
2257 * A program will fail to link if:
2258 *
2259 * * the <count> specified by TransformFeedbackVaryingsEXT is
2260 * non-zero, but the program object has no vertex or geometry
2261 * shader;
2262 */
2263 if (first == MESA_SHADER_FRAGMENT) {
2264 linker_error(prog, "Transform feedback varyings specified, but "
2265 "no vertex or geometry shader is present.");
2266 goto done;
2267 }
2268
2269 tfeedback_decls = ralloc_array(mem_ctx, tfeedback_decl,
2270 prog->TransformFeedback.NumVarying);
2271 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2272 prog->TransformFeedback.VaryingNames,
2273 tfeedback_decls))
2274 goto done;
2275 }
2276
2277 /* Linking the stages in the opposite order (from fragment to vertex)
2278 * ensures that inter-shader outputs written to in an earlier stage are
2279 * eliminated if they are (transitively) not used in a later stage.
2280 */
2281 int last, next;
2282 for (last = MESA_SHADER_TYPES-1; last >= 0; last--) {
2283 if (prog->_LinkedShaders[last] != NULL)
2284 break;
2285 }
2286
2287 if (last >= 0 && last < MESA_SHADER_FRAGMENT) {
2288 gl_shader *const sh = prog->_LinkedShaders[last];
2289
2290 if (num_tfeedback_decls != 0) {
2291 /* There was no fragment shader, but we still have to assign varying
2292 * locations for use by transform feedback.
2293 */
2294 if (!assign_varying_locations(ctx, mem_ctx, prog,
2295 sh, NULL,
2296 num_tfeedback_decls, tfeedback_decls,
2297 0))
2298 goto done;
2299 }
2300
2301 do_dead_builtin_varyings(ctx, sh, NULL,
2302 num_tfeedback_decls, tfeedback_decls);
2303
2304 demote_shader_inputs_and_outputs(sh, ir_var_shader_out);
2305
2306 /* Eliminate code that is now dead due to unused outputs being demoted.
2307 */
2308 while (do_dead_code(sh->ir, false))
2309 ;
2310 }
2311 else if (first == MESA_SHADER_FRAGMENT) {
2312 /* If the program only contains a fragment shader...
2313 */
2314 gl_shader *const sh = prog->_LinkedShaders[first];
2315
2316 do_dead_builtin_varyings(ctx, NULL, sh,
2317 num_tfeedback_decls, tfeedback_decls);
2318
2319 demote_shader_inputs_and_outputs(sh, ir_var_shader_in);
2320
2321 while (do_dead_code(sh->ir, false))
2322 ;
2323 }
2324
2325 next = last;
2326 for (int i = next - 1; i >= 0; i--) {
2327 if (prog->_LinkedShaders[i] == NULL)
2328 continue;
2329
2330 gl_shader *const sh_i = prog->_LinkedShaders[i];
2331 gl_shader *const sh_next = prog->_LinkedShaders[next];
2332 unsigned gs_input_vertices =
2333 next == MESA_SHADER_GEOMETRY ? prog->Geom.VerticesIn : 0;
2334
2335 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2336 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2337 tfeedback_decls, gs_input_vertices))
2338 goto done;
2339
2340 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2341 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2342 tfeedback_decls);
2343
2344 demote_shader_inputs_and_outputs(sh_i, ir_var_shader_out);
2345 demote_shader_inputs_and_outputs(sh_next, ir_var_shader_in);
2346
2347 /* Eliminate code that is now dead due to unused outputs being demoted.
2348 */
2349 while (do_dead_code(sh_i->ir, false))
2350 ;
2351 while (do_dead_code(sh_next->ir, false))
2352 ;
2353
2354 /* This must be done after all dead varyings are eliminated. */
2355 if (!check_against_output_limit(ctx, prog, sh_i))
2356 goto done;
2357 if (!check_against_input_limit(ctx, prog, sh_next))
2358 goto done;
2359
2360 next = i;
2361 }
2362
2363 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls))
2364 goto done;
2365
2366 update_array_sizes(prog);
2367 link_assign_uniform_locations(prog);
2368 link_assign_atomic_counter_resources(ctx, prog);
2369 store_fragdepth_layout(prog);
2370
2371 check_resources(ctx, prog);
2372 link_check_atomic_counter_resources(ctx, prog);
2373
2374 if (!prog->LinkStatus)
2375 goto done;
2376
2377 /* OpenGL ES requires that a vertex shader and a fragment shader both be
2378 * present in a linked program. By checking prog->IsES, we also
2379 * catch the GL_ARB_ES2_compatibility case.
2380 */
2381 if (!prog->InternalSeparateShader &&
2382 (ctx->API == API_OPENGLES2 || prog->IsES)) {
2383 if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
2384 linker_error(prog, "program lacks a vertex shader\n");
2385 } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
2386 linker_error(prog, "program lacks a fragment shader\n");
2387 }
2388 }
2389
2390 /* FINISHME: Assign fragment shader output locations. */
2391
2392 done:
2393 free(vert_shader_list);
2394 free(frag_shader_list);
2395 free(geom_shader_list);
2396
2397 for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
2398 if (prog->_LinkedShaders[i] == NULL)
2399 continue;
2400
2401 /* Do a final validation step to make sure that the IR wasn't
2402 * invalidated by any modifications performed after intrastage linking.
2403 */
2404 validate_ir_tree(prog->_LinkedShaders[i]->ir);
2405
2406 /* Retain any live IR, but trash the rest. */
2407 reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);
2408
2409 /* The symbol table in the linked shaders may contain references to
2410 * variables that were removed (e.g., unused uniforms). Since it may
2411 * contain junk, there is no possible valid use. Delete it and set the
2412 * pointer to NULL.
2413 */
2414 delete prog->_LinkedShaders[i]->symbols;
2415 prog->_LinkedShaders[i]->symbols = NULL;
2416 }
2417
2418 ralloc_free(mem_ctx);
2419 }