e70fa31a2b069f66af74d901134d4e9a35a7b2b1
[mesa.git] / src / glsl / linker.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file linker.cpp
26 * GLSL linker implementation
27 *
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
30 *
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
33 * together.
34 *
35 * - Undefined references in each shader are resolve to definitions in
36 * another shader.
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
41 *
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
44 *
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
47 *
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
51 * \c gl_FragColor.
52 *
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
55 *
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
63 *
64 * \author Ian Romanick <ian.d.romanick@intel.com>
65 */
66 #include <cstdlib>
67 #include <cstdio>
68 #include <cstdarg>
69
70 extern "C" {
71 #include <talloc.h>
72 }
73
74 #include "main/mtypes.h"
75 #include "glsl_symbol_table.h"
76 #include "glsl_parser_extras.h"
77 #include "ir.h"
78 #include "ir_optimization.h"
79 #include "program.h"
80 #include "hash_table.h"
81 #include "shader_api.h"
82
83 /**
84 * Visitor that determines whether or not a variable is ever written.
85 */
86 class find_assignment_visitor : public ir_hierarchical_visitor {
87 public:
88 find_assignment_visitor(const char *name)
89 : name(name), found(false)
90 {
91 /* empty */
92 }
93
94 virtual ir_visitor_status visit_enter(ir_assignment *ir)
95 {
96 ir_variable *const var = ir->lhs->variable_referenced();
97
98 if (strcmp(name, var->name) == 0) {
99 found = true;
100 return visit_stop;
101 }
102
103 return visit_continue_with_parent;
104 }
105
106 bool variable_found()
107 {
108 return found;
109 }
110
111 private:
112 const char *name; /**< Find writes to a variable with this name. */
113 bool found; /**< Was a write to the variable found? */
114 };
115
116
117 void
118 linker_error_printf(gl_shader_program *prog, const char *fmt, ...)
119 {
120 va_list ap;
121
122 prog->InfoLog = talloc_strdup_append(prog->InfoLog, "error: ");
123 va_start(ap, fmt);
124 prog->InfoLog = talloc_vasprintf_append(prog->InfoLog, fmt, ap);
125 va_end(ap);
126 }
127
128
129 void
130 invalidate_variable_locations(gl_shader *sh, enum ir_variable_mode mode,
131 int generic_base)
132 {
133 foreach_list(node, sh->ir) {
134 ir_variable *const var = ((ir_instruction *) node)->as_variable();
135
136 if ((var == NULL) || (var->mode != (unsigned) mode))
137 continue;
138
139 /* Only assign locations for generic attributes / varyings / etc.
140 */
141 if (var->location >= generic_base)
142 var->location = -1;
143 }
144 }
145
146
147 /**
148 * Determine the number of attribute slots required for a particular type
149 *
150 * This code is here because it implements the language rules of a specific
151 * GLSL version. Since it's a property of the language and not a property of
152 * types in general, it doesn't really belong in glsl_type.
153 */
154 unsigned
155 count_attribute_slots(const glsl_type *t)
156 {
157 /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
158 *
159 * "A scalar input counts the same amount against this limit as a vec4,
160 * so applications may want to consider packing groups of four
161 * unrelated float inputs together into a vector to better utilize the
162 * capabilities of the underlying hardware. A matrix input will use up
163 * multiple locations. The number of locations used will equal the
164 * number of columns in the matrix."
165 *
166 * The spec does not explicitly say how arrays are counted. However, it
167 * should be safe to assume the total number of slots consumed by an array
168 * is the number of entries in the array multiplied by the number of slots
169 * consumed by a single element of the array.
170 */
171
172 if (t->is_array())
173 return t->array_size() * count_attribute_slots(t->element_type());
174
175 if (t->is_matrix())
176 return t->matrix_columns;
177
178 return 1;
179 }
180
181
182 /**
183 * Verify that a vertex shader executable meets all semantic requirements
184 *
185 * \param shader Vertex shader executable to be verified
186 */
187 bool
188 validate_vertex_shader_executable(struct gl_shader_program *prog,
189 struct gl_shader *shader)
190 {
191 if (shader == NULL)
192 return true;
193
194 if (!shader->symbols->get_function("main")) {
195 linker_error_printf(prog, "vertex shader lacks `main'\n");
196 return false;
197 }
198
199 find_assignment_visitor find("gl_Position");
200 find.run(shader->ir);
201 if (!find.variable_found()) {
202 linker_error_printf(prog,
203 "vertex shader does not write to `gl_Position'\n");
204 return false;
205 }
206
207 return true;
208 }
209
210
211 /**
212 * Verify that a fragment shader executable meets all semantic requirements
213 *
214 * \param shader Fragment shader executable to be verified
215 */
216 bool
217 validate_fragment_shader_executable(struct gl_shader_program *prog,
218 struct gl_shader *shader)
219 {
220 if (shader == NULL)
221 return true;
222
223 if (!shader->symbols->get_function("main")) {
224 linker_error_printf(prog, "fragment shader lacks `main'\n");
225 return false;
226 }
227
228 find_assignment_visitor frag_color("gl_FragColor");
229 find_assignment_visitor frag_data("gl_FragData");
230
231 frag_color.run(shader->ir);
232 frag_data.run(shader->ir);
233
234 if (frag_color.variable_found() && frag_data.variable_found()) {
235 linker_error_printf(prog, "fragment shader writes to both "
236 "`gl_FragColor' and `gl_FragData'\n");
237 return false;
238 }
239
240 return true;
241 }
242
243
244 /**
245 * Perform validation of uniforms used across multiple shader stages
246 */
247 bool
248 cross_validate_uniforms(struct gl_shader_program *prog)
249 {
250 /* Examine all of the uniforms in all of the shaders and cross validate
251 * them.
252 */
253 glsl_symbol_table uniforms;
254 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
255 foreach_list(node, prog->_LinkedShaders[i]->ir) {
256 ir_variable *const var = ((ir_instruction *) node)->as_variable();
257
258 if ((var == NULL) || (var->mode != ir_var_uniform))
259 continue;
260
261 /* If a uniform with this name has already been seen, verify that the
262 * new instance has the same type. In addition, if the uniforms have
263 * initializers, the values of the initializers must be the same.
264 */
265 ir_variable *const existing = uniforms.get_variable(var->name);
266 if (existing != NULL) {
267 if (var->type != existing->type) {
268 linker_error_printf(prog, "uniform `%s' declared as type "
269 "`%s' and type `%s'\n",
270 var->name, var->type->name,
271 existing->type->name);
272 return false;
273 }
274
275 if (var->constant_value != NULL) {
276 if (existing->constant_value != NULL) {
277 if (!var->constant_value->has_value(existing->constant_value)) {
278 linker_error_printf(prog, "initializers for uniform "
279 "`%s' have differing values\n",
280 var->name);
281 return false;
282 }
283 } else
284 /* If the first-seen instance of a particular uniform did not
285 * have an initializer but a later instance does, copy the
286 * initializer to the version stored in the symbol table.
287 */
288 existing->constant_value =
289 (ir_constant *)var->constant_value->clone(NULL);
290 }
291 } else
292 uniforms.add_variable(var->name, var);
293 }
294 }
295
296 return true;
297 }
298
299
300 /**
301 * Validate that outputs from one stage match inputs of another
302 */
303 bool
304 cross_validate_outputs_to_inputs(struct gl_shader_program *prog,
305 gl_shader *producer, gl_shader *consumer)
306 {
307 glsl_symbol_table parameters;
308 /* FINISHME: Figure these out dynamically. */
309 const char *const producer_stage = "vertex";
310 const char *const consumer_stage = "fragment";
311
312 /* Find all shader outputs in the "producer" stage.
313 */
314 foreach_list(node, producer->ir) {
315 ir_variable *const var = ((ir_instruction *) node)->as_variable();
316
317 /* FINISHME: For geometry shaders, this should also look for inout
318 * FINISHME: variables.
319 */
320 if ((var == NULL) || (var->mode != ir_var_out))
321 continue;
322
323 parameters.add_variable(var->name, var);
324 }
325
326
327 /* Find all shader inputs in the "consumer" stage. Any variables that have
328 * matching outputs already in the symbol table must have the same type and
329 * qualifiers.
330 */
331 foreach_list(node, consumer->ir) {
332 ir_variable *const input = ((ir_instruction *) node)->as_variable();
333
334 /* FINISHME: For geometry shaders, this should also look for inout
335 * FINISHME: variables.
336 */
337 if ((input == NULL) || (input->mode != ir_var_in))
338 continue;
339
340 ir_variable *const output = parameters.get_variable(input->name);
341 if (output != NULL) {
342 /* Check that the types match between stages.
343 */
344 if (input->type != output->type) {
345 linker_error_printf(prog,
346 "%s shader output `%s' delcared as "
347 "type `%s', but %s shader input declared "
348 "as type `%s'\n",
349 producer_stage, output->name,
350 output->type->name,
351 consumer_stage, input->type->name);
352 return false;
353 }
354
355 /* Check that all of the qualifiers match between stages.
356 */
357 if (input->centroid != output->centroid) {
358 linker_error_printf(prog,
359 "%s shader output `%s' %s centroid qualifier, "
360 "but %s shader input %s centroid qualifier\n",
361 producer_stage,
362 output->name,
363 (output->centroid) ? "has" : "lacks",
364 consumer_stage,
365 (input->centroid) ? "has" : "lacks");
366 return false;
367 }
368
369 if (input->invariant != output->invariant) {
370 linker_error_printf(prog,
371 "%s shader output `%s' %s invariant qualifier, "
372 "but %s shader input %s invariant qualifier\n",
373 producer_stage,
374 output->name,
375 (output->invariant) ? "has" : "lacks",
376 consumer_stage,
377 (input->invariant) ? "has" : "lacks");
378 return false;
379 }
380
381 if (input->interpolation != output->interpolation) {
382 linker_error_printf(prog,
383 "%s shader output `%s' specifies %s "
384 "interpolation qualifier, "
385 "but %s shader input specifies %s "
386 "interpolation qualifier\n",
387 producer_stage,
388 output->name,
389 output->interpolation_string(),
390 consumer_stage,
391 input->interpolation_string());
392 return false;
393 }
394 }
395 }
396
397 return true;
398 }
399
400
401 /**
402 * Populates a shaders symbol table with all global declarations
403 */
404 static void
405 populate_symbol_table(gl_shader *sh)
406 {
407 sh->symbols = new(sh) glsl_symbol_table;
408
409 foreach_list(node, sh->ir) {
410 ir_instruction *const inst = (ir_instruction *) node;
411 ir_variable *var;
412 ir_function *func;
413
414 if ((func = inst->as_function()) != NULL) {
415 sh->symbols->add_function(func->name, func);
416 } else if ((var = inst->as_variable()) != NULL) {
417 sh->symbols->add_variable(var->name, var);
418 }
419 }
420 }
421
422
423 /**
424 * Combine a group of shaders for a single stage to generate a linked shader
425 *
426 * \note
427 * If this function is supplied a single shader, it is cloned, and the new
428 * shader is returned.
429 */
430 static struct gl_shader *
431 link_intrastage_shaders(struct gl_shader_program *prog,
432 struct gl_shader **shader_list,
433 unsigned num_shaders)
434 {
435 (void) prog;
436 assert(num_shaders == 1);
437
438 gl_shader *const linked = _mesa_new_shader(NULL, 0, shader_list[0]->Type);
439 linked->ir = new(linked) exec_list;
440 clone_ir_list(linked->ir, shader_list[0]->ir);
441
442 populate_symbol_table(linked);
443
444 return linked;
445 }
446
447
448 struct uniform_node {
449 exec_node link;
450 struct gl_uniform *u;
451 unsigned slots;
452 };
453
454 void
455 assign_uniform_locations(struct gl_shader_program *prog)
456 {
457 /* */
458 exec_list uniforms;
459 unsigned total_uniforms = 0;
460 hash_table *ht = hash_table_ctor(32, hash_table_string_hash,
461 hash_table_string_compare);
462
463 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
464 unsigned next_position = 0;
465
466 foreach_list(node, prog->_LinkedShaders[i]->ir) {
467 ir_variable *const var = ((ir_instruction *) node)->as_variable();
468
469 if ((var == NULL) || (var->mode != ir_var_uniform))
470 continue;
471
472 const unsigned vec4_slots = (var->component_slots() + 3) / 4;
473 assert(vec4_slots != 0);
474
475 uniform_node *n = (uniform_node *) hash_table_find(ht, var->name);
476 if (n == NULL) {
477 n = (uniform_node *) calloc(1, sizeof(struct uniform_node));
478 n->u = (gl_uniform *) calloc(vec4_slots, sizeof(struct gl_uniform));
479 n->slots = vec4_slots;
480
481 n->u[0].Name = strdup(var->name);
482 for (unsigned j = 1; j < vec4_slots; j++)
483 n->u[j].Name = n->u[0].Name;
484
485 hash_table_insert(ht, n, n->u[0].Name);
486 uniforms.push_tail(& n->link);
487 total_uniforms += vec4_slots;
488 }
489
490 if (var->constant_value != NULL)
491 for (unsigned j = 0; j < vec4_slots; j++)
492 n->u[j].Initialized = true;
493
494 var->location = next_position;
495
496 for (unsigned j = 0; j < vec4_slots; j++) {
497 switch (prog->_LinkedShaders[i]->Type) {
498 case GL_VERTEX_SHADER:
499 n->u[j].VertPos = next_position;
500 break;
501 case GL_FRAGMENT_SHADER:
502 n->u[j].FragPos = next_position;
503 break;
504 case GL_GEOMETRY_SHADER:
505 /* FINISHME: Support geometry shaders. */
506 assert(prog->_LinkedShaders[i]->Type != GL_GEOMETRY_SHADER);
507 break;
508 }
509
510 next_position++;
511 }
512 }
513 }
514
515 gl_uniform_list *ul = (gl_uniform_list *)
516 calloc(1, sizeof(gl_uniform_list));
517
518 ul->Size = total_uniforms;
519 ul->NumUniforms = total_uniforms;
520 ul->Uniforms = (gl_uniform *) calloc(total_uniforms, sizeof(gl_uniform));
521
522 unsigned idx = 0;
523 uniform_node *next;
524 for (uniform_node *node = (uniform_node *) uniforms.head
525 ; node->link.next != NULL
526 ; node = next) {
527 next = (uniform_node *) node->link.next;
528
529 node->link.remove();
530 memcpy(&ul->Uniforms[idx], node->u, sizeof(gl_uniform) * node->slots);
531 idx += node->slots;
532
533 free(node->u);
534 free(node);
535 }
536
537 hash_table_dtor(ht);
538
539 prog->Uniforms = ul;
540 }
541
542
543 /**
544 * Find a contiguous set of available bits in a bitmask
545 *
546 * \param used_mask Bits representing used (1) and unused (0) locations
547 * \param needed_count Number of contiguous bits needed.
548 *
549 * \return
550 * Base location of the available bits on success or -1 on failure.
551 */
552 int
553 find_available_slots(unsigned used_mask, unsigned needed_count)
554 {
555 unsigned needed_mask = (1 << needed_count) - 1;
556 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
557
558 /* The comparison to 32 is redundant, but without it GCC emits "warning:
559 * cannot optimize possibly infinite loops" for the loop below.
560 */
561 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
562 return -1;
563
564 for (int i = 0; i <= max_bit_to_test; i++) {
565 if ((needed_mask & ~used_mask) == needed_mask)
566 return i;
567
568 needed_mask <<= 1;
569 }
570
571 return -1;
572 }
573
574
575 bool
576 assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index)
577 {
578 /* Mark invalid attribute locations as being used.
579 */
580 unsigned used_locations = (max_attribute_index >= 32)
581 ? ~0 : ~((1 << max_attribute_index) - 1);
582
583 gl_shader *const sh = prog->_LinkedShaders[0];
584 assert(sh->Type == GL_VERTEX_SHADER);
585
586 /* Operate in a total of four passes.
587 *
588 * 1. Invalidate the location assignments for all vertex shader inputs.
589 *
590 * 2. Assign locations for inputs that have user-defined (via
591 * glBindVertexAttribLocation) locatoins.
592 *
593 * 3. Sort the attributes without assigned locations by number of slots
594 * required in decreasing order. Fragmentation caused by attribute
595 * locations assigned by the application may prevent large attributes
596 * from having enough contiguous space.
597 *
598 * 4. Assign locations to any inputs without assigned locations.
599 */
600
601 invalidate_variable_locations(sh, ir_var_in, VERT_ATTRIB_GENERIC0);
602
603 if (prog->Attributes != NULL) {
604 for (unsigned i = 0; i < prog->Attributes->NumParameters; i++) {
605 ir_variable *const var =
606 sh->symbols->get_variable(prog->Attributes->Parameters[i].Name);
607
608 /* Note: attributes that occupy multiple slots, such as arrays or
609 * matrices, may appear in the attrib array multiple times.
610 */
611 if ((var == NULL) || (var->location != -1))
612 continue;
613
614 /* From page 61 of the OpenGL 4.0 spec:
615 *
616 * "LinkProgram will fail if the attribute bindings assigned by
617 * BindAttribLocation do not leave not enough space to assign a
618 * location for an active matrix attribute or an active attribute
619 * array, both of which require multiple contiguous generic
620 * attributes."
621 *
622 * Previous versions of the spec contain similar language but omit the
623 * bit about attribute arrays.
624 *
625 * Page 61 of the OpenGL 4.0 spec also says:
626 *
627 * "It is possible for an application to bind more than one
628 * attribute name to the same location. This is referred to as
629 * aliasing. This will only work if only one of the aliased
630 * attributes is active in the executable program, or if no path
631 * through the shader consumes more than one attribute of a set
632 * of attributes aliased to the same location. A link error can
633 * occur if the linker determines that every path through the
634 * shader consumes multiple aliased attributes, but
635 * implementations are not required to generate an error in this
636 * case."
637 *
638 * These two paragraphs are either somewhat contradictory, or I don't
639 * fully understand one or both of them.
640 */
641 /* FINISHME: The code as currently written does not support attribute
642 * FINISHME: location aliasing (see comment above).
643 */
644 const int attr = prog->Attributes->Parameters[i].StateIndexes[0];
645 const unsigned slots = count_attribute_slots(var->type);
646
647 /* Mask representing the contiguous slots that will be used by this
648 * attribute.
649 */
650 const unsigned use_mask = (1 << slots) - 1;
651
652 /* Generate a link error if the set of bits requested for this
653 * attribute overlaps any previously allocated bits.
654 */
655 if ((~(use_mask << attr) & used_locations) != used_locations) {
656 linker_error_printf(prog,
657 "insufficient contiguous attribute locations "
658 "available for vertex shader input `%s'",
659 var->name);
660 return false;
661 }
662
663 var->location = VERT_ATTRIB_GENERIC0 + attr;
664 used_locations |= (use_mask << attr);
665 }
666 }
667
668 /* Temporary storage for the set of attributes that need locations assigned.
669 */
670 struct temp_attr {
671 unsigned slots;
672 ir_variable *var;
673
674 /* Used below in the call to qsort. */
675 static int compare(const void *a, const void *b)
676 {
677 const temp_attr *const l = (const temp_attr *) a;
678 const temp_attr *const r = (const temp_attr *) b;
679
680 /* Reversed because we want a descending order sort below. */
681 return r->slots - l->slots;
682 }
683 } to_assign[16];
684
685 unsigned num_attr = 0;
686
687 foreach_list(node, sh->ir) {
688 ir_variable *const var = ((ir_instruction *) node)->as_variable();
689
690 if ((var == NULL) || (var->mode != ir_var_in))
691 continue;
692
693 /* The location was explicitly assigned, nothing to do here.
694 */
695 if (var->location != -1)
696 continue;
697
698 to_assign[num_attr].slots = count_attribute_slots(var->type);
699 to_assign[num_attr].var = var;
700 num_attr++;
701 }
702
703 /* If all of the attributes were assigned locations by the application (or
704 * are built-in attributes with fixed locations), return early. This should
705 * be the common case.
706 */
707 if (num_attr == 0)
708 return true;
709
710 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
711
712 /* VERT_ATTRIB_GENERIC0 is a psdueo-alias for VERT_ATTRIB_POS. It can only
713 * be explicitly assigned by via glBindAttribLocation. Mark it as reserved
714 * to prevent it from being automatically allocated below.
715 */
716 used_locations |= (1 << 0);
717
718 for (unsigned i = 0; i < num_attr; i++) {
719 /* Mask representing the contiguous slots that will be used by this
720 * attribute.
721 */
722 const unsigned use_mask = (1 << to_assign[i].slots) - 1;
723
724 int location = find_available_slots(used_locations, to_assign[i].slots);
725
726 if (location < 0) {
727 linker_error_printf(prog,
728 "insufficient contiguous attribute locations "
729 "available for vertex shader input `%s'",
730 to_assign[i].var->name);
731 return false;
732 }
733
734 to_assign[i].var->location = VERT_ATTRIB_GENERIC0 + location;
735 used_locations |= (use_mask << location);
736 }
737
738 return true;
739 }
740
741
742 void
743 assign_varying_locations(gl_shader *producer, gl_shader *consumer)
744 {
745 /* FINISHME: Set dynamically when geometry shader support is added. */
746 unsigned output_index = VERT_RESULT_VAR0;
747 unsigned input_index = FRAG_ATTRIB_VAR0;
748
749 /* Operate in a total of three passes.
750 *
751 * 1. Assign locations for any matching inputs and outputs.
752 *
753 * 2. Mark output variables in the producer that do not have locations as
754 * not being outputs. This lets the optimizer eliminate them.
755 *
756 * 3. Mark input variables in the consumer that do not have locations as
757 * not being inputs. This lets the optimizer eliminate them.
758 */
759
760 invalidate_variable_locations(producer, ir_var_out, VERT_RESULT_VAR0);
761 invalidate_variable_locations(consumer, ir_var_in, FRAG_ATTRIB_VAR0);
762
763 foreach_list(node, producer->ir) {
764 ir_variable *const output_var = ((ir_instruction *) node)->as_variable();
765
766 if ((output_var == NULL) || (output_var->mode != ir_var_out)
767 || (output_var->location != -1))
768 continue;
769
770 ir_variable *const input_var =
771 consumer->symbols->get_variable(output_var->name);
772
773 if ((input_var == NULL) || (input_var->mode != ir_var_in))
774 continue;
775
776 assert(input_var->location == -1);
777
778 /* FINISHME: Location assignment will need some changes when arrays,
779 * FINISHME: matrices, and structures are allowed as shader inputs /
780 * FINISHME: outputs.
781 */
782 output_var->location = output_index;
783 input_var->location = input_index;
784
785 output_index++;
786 input_index++;
787 }
788
789 foreach_list(node, producer->ir) {
790 ir_variable *const var = ((ir_instruction *) node)->as_variable();
791
792 if ((var == NULL) || (var->mode != ir_var_out))
793 continue;
794
795 /* An 'out' variable is only really a shader output if its value is read
796 * by the following stage.
797 */
798 var->shader_out = (var->location != -1);
799 }
800
801 foreach_list(node, consumer->ir) {
802 ir_variable *const var = ((ir_instruction *) node)->as_variable();
803
804 if ((var == NULL) || (var->mode != ir_var_in))
805 continue;
806
807 /* An 'in' variable is only really a shader input if its value is written
808 * by the previous stage.
809 */
810 var->shader_in = (var->location != -1);
811 }
812 }
813
814
815 void
816 link_shaders(struct gl_shader_program *prog)
817 {
818 prog->LinkStatus = false;
819 prog->Validated = false;
820 prog->_Used = false;
821
822 if (prog->InfoLog != NULL)
823 talloc_free(prog->InfoLog);
824
825 prog->InfoLog = talloc_strdup(NULL, "");
826
827 /* Separate the shaders into groups based on their type.
828 */
829 struct gl_shader **vert_shader_list;
830 unsigned num_vert_shaders = 0;
831 struct gl_shader **frag_shader_list;
832 unsigned num_frag_shaders = 0;
833
834 vert_shader_list = (struct gl_shader **)
835 calloc(2 * prog->NumShaders, sizeof(struct gl_shader *));
836 frag_shader_list = &vert_shader_list[prog->NumShaders];
837
838 for (unsigned i = 0; i < prog->NumShaders; i++) {
839 switch (prog->Shaders[i]->Type) {
840 case GL_VERTEX_SHADER:
841 vert_shader_list[num_vert_shaders] = prog->Shaders[i];
842 num_vert_shaders++;
843 break;
844 case GL_FRAGMENT_SHADER:
845 frag_shader_list[num_frag_shaders] = prog->Shaders[i];
846 num_frag_shaders++;
847 break;
848 case GL_GEOMETRY_SHADER:
849 /* FINISHME: Support geometry shaders. */
850 assert(prog->Shaders[i]->Type != GL_GEOMETRY_SHADER);
851 break;
852 }
853 }
854
855 /* FINISHME: Implement intra-stage linking. */
856 prog->_NumLinkedShaders = 0;
857 if (num_vert_shaders > 0) {
858 gl_shader *const sh =
859 link_intrastage_shaders(prog, vert_shader_list, num_vert_shaders);
860
861 if (sh == NULL)
862 goto done;
863
864 if (!validate_vertex_shader_executable(prog, sh))
865 goto done;
866
867 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh;
868 prog->_NumLinkedShaders++;
869 }
870
871 if (num_frag_shaders > 0) {
872 gl_shader *const sh =
873 link_intrastage_shaders(prog, frag_shader_list, num_frag_shaders);
874
875 if (sh == NULL)
876 goto done;
877
878 if (!validate_fragment_shader_executable(prog, sh))
879 goto done;
880
881 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh;
882 prog->_NumLinkedShaders++;
883 }
884
885 /* Here begins the inter-stage linking phase. Some initial validation is
886 * performed, then locations are assigned for uniforms, attributes, and
887 * varyings.
888 */
889 if (cross_validate_uniforms(prog)) {
890 /* Validate the inputs of each stage with the output of the preceeding
891 * stage.
892 */
893 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++) {
894 if (!cross_validate_outputs_to_inputs(prog,
895 prog->_LinkedShaders[i - 1],
896 prog->_LinkedShaders[i]))
897 goto done;
898 }
899
900 prog->LinkStatus = true;
901 }
902
903 /* FINISHME: Perform whole-program optimization here. */
904
905 assign_uniform_locations(prog);
906
907 if (prog->_LinkedShaders[0]->Type == GL_VERTEX_SHADER)
908 /* FINISHME: The value of the max_attribute_index parameter is
909 * FINISHME: implementation dependent based on the value of
910 * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
911 * FINISHME: at least 16, so hardcode 16 for now.
912 */
913 if (!assign_attribute_locations(prog, 16))
914 goto done;
915
916 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++)
917 assign_varying_locations(prog->_LinkedShaders[i - 1],
918 prog->_LinkedShaders[i]);
919
920 /* FINISHME: Assign fragment shader output locations. */
921
922 done:
923 free(vert_shader_list);
924 }