glsl2: Move the compiler to the subdirectory it will live in in Mesa.
[mesa.git] / src / glsl / linker.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file linker.cpp
26 * GLSL linker implementation
27 *
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
30 *
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
33 * together.
34 *
35 * - Undefined references in each shader are resolve to definitions in
36 * another shader.
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
41 *
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
44 *
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
47 *
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
51 * \c gl_FragColor.
52 *
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
55 *
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
63 *
64 * \author Ian Romanick <ian.d.romanick@intel.com>
65 */
66 #include <cstdlib>
67 #include <cstdio>
68 #include <cstdarg>
69
70 extern "C" {
71 #include <talloc.h>
72 }
73
74 #include "main/mtypes.h"
75 #include "glsl_symbol_table.h"
76 #include "glsl_parser_extras.h"
77 #include "ir.h"
78 #include "ir_optimization.h"
79 #include "program.h"
80 #include "hash_table.h"
81
82 /**
83 * Visitor that determines whether or not a variable is ever written.
84 */
85 class find_assignment_visitor : public ir_hierarchical_visitor {
86 public:
87 find_assignment_visitor(const char *name)
88 : name(name), found(false)
89 {
90 /* empty */
91 }
92
93 virtual ir_visitor_status visit_enter(ir_assignment *ir)
94 {
95 ir_variable *const var = ir->lhs->variable_referenced();
96
97 if (strcmp(name, var->name) == 0) {
98 found = true;
99 return visit_stop;
100 }
101
102 return visit_continue_with_parent;
103 }
104
105 bool variable_found()
106 {
107 return found;
108 }
109
110 private:
111 const char *name; /**< Find writes to a variable with this name. */
112 bool found; /**< Was a write to the variable found? */
113 };
114
115
116 void
117 linker_error_printf(glsl_program *prog, const char *fmt, ...)
118 {
119 va_list ap;
120
121 prog->InfoLog = talloc_strdup_append(prog->InfoLog, "error: ");
122 va_start(ap, fmt);
123 prog->InfoLog = talloc_vasprintf_append(prog->InfoLog, fmt, ap);
124 va_end(ap);
125 }
126
127
128 void
129 invalidate_variable_locations(glsl_shader *sh, enum ir_variable_mode mode,
130 int generic_base)
131 {
132 foreach_list(node, &sh->ir) {
133 ir_variable *const var = ((ir_instruction *) node)->as_variable();
134
135 if ((var == NULL) || (var->mode != (unsigned) mode))
136 continue;
137
138 /* Only assign locations for generic attributes / varyings / etc.
139 */
140 if (var->location >= generic_base)
141 var->location = -1;
142 }
143 }
144
145
146 /**
147 * Determine the number of attribute slots required for a particular type
148 *
149 * This code is here because it implements the language rules of a specific
150 * GLSL version. Since it's a property of the language and not a property of
151 * types in general, it doesn't really belong in glsl_type.
152 */
153 unsigned
154 count_attribute_slots(const glsl_type *t)
155 {
156 /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
157 *
158 * "A scalar input counts the same amount against this limit as a vec4,
159 * so applications may want to consider packing groups of four
160 * unrelated float inputs together into a vector to better utilize the
161 * capabilities of the underlying hardware. A matrix input will use up
162 * multiple locations. The number of locations used will equal the
163 * number of columns in the matrix."
164 *
165 * The spec does not explicitly say how arrays are counted. However, it
166 * should be safe to assume the total number of slots consumed by an array
167 * is the number of entries in the array multiplied by the number of slots
168 * consumed by a single element of the array.
169 */
170
171 if (t->is_array())
172 return t->array_size() * count_attribute_slots(t->element_type());
173
174 if (t->is_matrix())
175 return t->matrix_columns;
176
177 return 1;
178 }
179
180
181 /**
182 * Verify that a vertex shader executable meets all semantic requirements
183 *
184 * \param shader Vertex shader executable to be verified
185 */
186 bool
187 validate_vertex_shader_executable(struct glsl_program *prog,
188 struct glsl_shader *shader)
189 {
190 if (shader == NULL)
191 return true;
192
193 if (!shader->symbols->get_function("main")) {
194 linker_error_printf(prog, "vertex shader lacks `main'\n");
195 return false;
196 }
197
198 find_assignment_visitor find("gl_Position");
199 find.run(&shader->ir);
200 if (!find.variable_found()) {
201 linker_error_printf(prog,
202 "vertex shader does not write to `gl_Position'\n");
203 return false;
204 }
205
206 return true;
207 }
208
209
210 /**
211 * Verify that a fragment shader executable meets all semantic requirements
212 *
213 * \param shader Fragment shader executable to be verified
214 */
215 bool
216 validate_fragment_shader_executable(struct glsl_program *prog,
217 struct glsl_shader *shader)
218 {
219 if (shader == NULL)
220 return true;
221
222 if (!shader->symbols->get_function("main")) {
223 linker_error_printf(prog, "fragment shader lacks `main'\n");
224 return false;
225 }
226
227 find_assignment_visitor frag_color("gl_FragColor");
228 find_assignment_visitor frag_data("gl_FragData");
229
230 frag_color.run(&shader->ir);
231 frag_data.run(&shader->ir);
232
233 if (!frag_color.variable_found() && !frag_data.variable_found()) {
234 linker_error_printf(prog, "fragment shader does not write to "
235 "`gl_FragColor' or `gl_FragData'\n");
236 return false;
237 }
238
239 if (frag_color.variable_found() && frag_data.variable_found()) {
240 linker_error_printf(prog, "fragment shader writes to both "
241 "`gl_FragColor' and `gl_FragData'\n");
242 return false;
243 }
244
245 return true;
246 }
247
248
249 /**
250 * Perform validation of uniforms used across multiple shader stages
251 */
252 bool
253 cross_validate_uniforms(struct glsl_program *prog)
254 {
255 /* Examine all of the uniforms in all of the shaders and cross validate
256 * them.
257 */
258 glsl_symbol_table uniforms;
259 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
260 foreach_list(node, &prog->_LinkedShaders[i]->ir) {
261 ir_variable *const var = ((ir_instruction *) node)->as_variable();
262
263 if ((var == NULL) || (var->mode != ir_var_uniform))
264 continue;
265
266 /* If a uniform with this name has already been seen, verify that the
267 * new instance has the same type. In addition, if the uniforms have
268 * initializers, the values of the initializers must be the same.
269 */
270 ir_variable *const existing = uniforms.get_variable(var->name);
271 if (existing != NULL) {
272 if (var->type != existing->type) {
273 linker_error_printf(prog, "uniform `%s' declared as type "
274 "`%s' and type `%s'\n",
275 var->name, var->type->name,
276 existing->type->name);
277 return false;
278 }
279
280 if (var->constant_value != NULL) {
281 if (existing->constant_value != NULL) {
282 if (!var->constant_value->has_value(existing->constant_value)) {
283 linker_error_printf(prog, "initializers for uniform "
284 "`%s' have differing values\n",
285 var->name);
286 return false;
287 }
288 } else
289 /* If the first-seen instance of a particular uniform did not
290 * have an initializer but a later instance does, copy the
291 * initializer to the version stored in the symbol table.
292 */
293 existing->constant_value =
294 (ir_constant *)var->constant_value->clone(NULL);
295 }
296 } else
297 uniforms.add_variable(var->name, var);
298 }
299 }
300
301 return true;
302 }
303
304
305 /**
306 * Validate that outputs from one stage match inputs of another
307 */
308 bool
309 cross_validate_outputs_to_inputs(struct glsl_program *prog,
310 glsl_shader *producer, glsl_shader *consumer)
311 {
312 glsl_symbol_table parameters;
313 /* FINISHME: Figure these out dynamically. */
314 const char *const producer_stage = "vertex";
315 const char *const consumer_stage = "fragment";
316
317 /* Find all shader outputs in the "producer" stage.
318 */
319 foreach_list(node, &producer->ir) {
320 ir_variable *const var = ((ir_instruction *) node)->as_variable();
321
322 /* FINISHME: For geometry shaders, this should also look for inout
323 * FINISHME: variables.
324 */
325 if ((var == NULL) || (var->mode != ir_var_out))
326 continue;
327
328 parameters.add_variable(var->name, var);
329 }
330
331
332 /* Find all shader inputs in the "consumer" stage. Any variables that have
333 * matching outputs already in the symbol table must have the same type and
334 * qualifiers.
335 */
336 foreach_list(node, &consumer->ir) {
337 ir_variable *const input = ((ir_instruction *) node)->as_variable();
338
339 /* FINISHME: For geometry shaders, this should also look for inout
340 * FINISHME: variables.
341 */
342 if ((input == NULL) || (input->mode != ir_var_in))
343 continue;
344
345 ir_variable *const output = parameters.get_variable(input->name);
346 if (output != NULL) {
347 /* Check that the types match between stages.
348 */
349 if (input->type != output->type) {
350 linker_error_printf(prog,
351 "%s shader output `%s' delcared as "
352 "type `%s', but %s shader input declared "
353 "as type `%s'\n",
354 producer_stage, output->name,
355 output->type->name,
356 consumer_stage, input->type->name);
357 return false;
358 }
359
360 /* Check that all of the qualifiers match between stages.
361 */
362 if (input->centroid != output->centroid) {
363 linker_error_printf(prog,
364 "%s shader output `%s' %s centroid qualifier, "
365 "but %s shader input %s centroid qualifier\n",
366 producer_stage,
367 output->name,
368 (output->centroid) ? "has" : "lacks",
369 consumer_stage,
370 (input->centroid) ? "has" : "lacks");
371 return false;
372 }
373
374 if (input->invariant != output->invariant) {
375 linker_error_printf(prog,
376 "%s shader output `%s' %s invariant qualifier, "
377 "but %s shader input %s invariant qualifier\n",
378 producer_stage,
379 output->name,
380 (output->invariant) ? "has" : "lacks",
381 consumer_stage,
382 (input->invariant) ? "has" : "lacks");
383 return false;
384 }
385
386 if (input->interpolation != output->interpolation) {
387 linker_error_printf(prog,
388 "%s shader output `%s' specifies %s "
389 "interpolation qualifier, "
390 "but %s shader input specifies %s "
391 "interpolation qualifier\n",
392 producer_stage,
393 output->name,
394 output->interpolation_string(),
395 consumer_stage,
396 input->interpolation_string());
397 return false;
398 }
399 }
400 }
401
402 return true;
403 }
404
405
406 struct uniform_node {
407 exec_node link;
408 struct gl_uniform *u;
409 unsigned slots;
410 };
411
412 void
413 assign_uniform_locations(struct glsl_program *prog)
414 {
415 /* */
416 exec_list uniforms;
417 unsigned total_uniforms = 0;
418 hash_table *ht = hash_table_ctor(32, hash_table_string_hash,
419 hash_table_string_compare);
420
421 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
422 unsigned next_position = 0;
423
424 foreach_list(node, &prog->_LinkedShaders[i]->ir) {
425 ir_variable *const var = ((ir_instruction *) node)->as_variable();
426
427 if ((var == NULL) || (var->mode != ir_var_uniform))
428 continue;
429
430 const unsigned vec4_slots = (var->component_slots() + 3) / 4;
431 assert(vec4_slots != 0);
432
433 uniform_node *n = (uniform_node *) hash_table_find(ht, var->name);
434 if (n == NULL) {
435 n = (uniform_node *) calloc(1, sizeof(struct uniform_node));
436 n->u = (gl_uniform *) calloc(vec4_slots, sizeof(struct gl_uniform));
437 n->slots = vec4_slots;
438
439 n->u[0].Name = strdup(var->name);
440 for (unsigned j = 1; j < vec4_slots; j++)
441 n->u[j].Name = n->u[0].Name;
442
443 hash_table_insert(ht, n, n->u[0].Name);
444 uniforms.push_tail(& n->link);
445 total_uniforms += vec4_slots;
446 }
447
448 if (var->constant_value != NULL)
449 for (unsigned j = 0; j < vec4_slots; j++)
450 n->u[j].Initialized = true;
451
452 var->location = next_position;
453
454 for (unsigned j = 0; j < vec4_slots; j++) {
455 switch (prog->_LinkedShaders[i]->Type) {
456 case GL_VERTEX_SHADER:
457 n->u[j].VertPos = next_position;
458 break;
459 case GL_FRAGMENT_SHADER:
460 n->u[j].FragPos = next_position;
461 break;
462 case GL_GEOMETRY_SHADER:
463 /* FINISHME: Support geometry shaders. */
464 assert(prog->_LinkedShaders[i]->Type != GL_GEOMETRY_SHADER);
465 break;
466 }
467
468 next_position++;
469 }
470 }
471 }
472
473 gl_uniform_list *ul = (gl_uniform_list *)
474 calloc(1, sizeof(gl_uniform_list));
475
476 ul->Size = total_uniforms;
477 ul->NumUniforms = total_uniforms;
478 ul->Uniforms = (gl_uniform *) calloc(total_uniforms, sizeof(gl_uniform));
479
480 unsigned idx = 0;
481 uniform_node *next;
482 for (uniform_node *node = (uniform_node *) uniforms.head
483 ; node->link.next != NULL
484 ; node = next) {
485 next = (uniform_node *) node->link.next;
486
487 node->link.remove();
488 memcpy(&ul->Uniforms[idx], node->u, sizeof(gl_uniform) * node->slots);
489 idx += node->slots;
490
491 free(node->u);
492 free(node);
493 }
494
495 hash_table_dtor(ht);
496
497 prog->Uniforms = ul;
498 }
499
500
501 /**
502 * Find a contiguous set of available bits in a bitmask
503 *
504 * \param used_mask Bits representing used (1) and unused (0) locations
505 * \param needed_count Number of contiguous bits needed.
506 *
507 * \return
508 * Base location of the available bits on success or -1 on failure.
509 */
510 int
511 find_available_slots(unsigned used_mask, unsigned needed_count)
512 {
513 unsigned needed_mask = (1 << needed_count) - 1;
514 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
515
516 /* The comparison to 32 is redundant, but without it GCC emits "warning:
517 * cannot optimize possibly infinite loops" for the loop below.
518 */
519 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
520 return -1;
521
522 for (int i = 0; i <= max_bit_to_test; i++) {
523 if ((needed_mask & ~used_mask) == needed_mask)
524 return i;
525
526 needed_mask <<= 1;
527 }
528
529 return -1;
530 }
531
532
533 bool
534 assign_attribute_locations(glsl_program *prog, unsigned max_attribute_index)
535 {
536 /* Mark invalid attribute locations as being used.
537 */
538 unsigned used_locations = (max_attribute_index >= 32)
539 ? ~0 : ~((1 << max_attribute_index) - 1);
540
541 glsl_shader *const sh = prog->_LinkedShaders[0];
542 assert(sh->Type == GL_VERTEX_SHADER);
543
544 /* Operate in a total of four passes.
545 *
546 * 1. Invalidate the location assignments for all vertex shader inputs.
547 *
548 * 2. Assign locations for inputs that have user-defined (via
549 * glBindVertexAttribLocation) locatoins.
550 *
551 * 3. Sort the attributes without assigned locations by number of slots
552 * required in decreasing order. Fragmentation caused by attribute
553 * locations assigned by the application may prevent large attributes
554 * from having enough contiguous space.
555 *
556 * 4. Assign locations to any inputs without assigned locations.
557 */
558
559 invalidate_variable_locations(sh, ir_var_in, VERT_ATTRIB_GENERIC0);
560
561 if (prog->Attributes != NULL) {
562 for (unsigned i = 0; i < prog->Attributes->NumParameters; i++) {
563 ir_variable *const var =
564 sh->symbols->get_variable(prog->Attributes->Parameters[i].Name);
565
566 /* Note: attributes that occupy multiple slots, such as arrays or
567 * matrices, may appear in the attrib array multiple times.
568 */
569 if ((var == NULL) || (var->location != -1))
570 continue;
571
572 /* From page 61 of the OpenGL 4.0 spec:
573 *
574 * "LinkProgram will fail if the attribute bindings assigned by
575 * BindAttribLocation do not leave not enough space to assign a
576 * location for an active matrix attribute or an active attribute
577 * array, both of which require multiple contiguous generic
578 * attributes."
579 *
580 * Previous versions of the spec contain similar language but omit the
581 * bit about attribute arrays.
582 *
583 * Page 61 of the OpenGL 4.0 spec also says:
584 *
585 * "It is possible for an application to bind more than one
586 * attribute name to the same location. This is referred to as
587 * aliasing. This will only work if only one of the aliased
588 * attributes is active in the executable program, or if no path
589 * through the shader consumes more than one attribute of a set
590 * of attributes aliased to the same location. A link error can
591 * occur if the linker determines that every path through the
592 * shader consumes multiple aliased attributes, but
593 * implementations are not required to generate an error in this
594 * case."
595 *
596 * These two paragraphs are either somewhat contradictory, or I don't
597 * fully understand one or both of them.
598 */
599 /* FINISHME: The code as currently written does not support attribute
600 * FINISHME: location aliasing (see comment above).
601 */
602 const int attr = prog->Attributes->Parameters[i].StateIndexes[0];
603 const unsigned slots = count_attribute_slots(var->type);
604
605 /* Mask representing the contiguous slots that will be used by this
606 * attribute.
607 */
608 const unsigned use_mask = (1 << slots) - 1;
609
610 /* Generate a link error if the set of bits requested for this
611 * attribute overlaps any previously allocated bits.
612 */
613 if ((~(use_mask << attr) & used_locations) != used_locations) {
614 linker_error_printf(prog,
615 "insufficient contiguous attribute locations "
616 "available for vertex shader input `%s'",
617 var->name);
618 return false;
619 }
620
621 var->location = VERT_ATTRIB_GENERIC0 + attr;
622 used_locations |= (use_mask << attr);
623 }
624 }
625
626 /* Temporary storage for the set of attributes that need locations assigned.
627 */
628 struct temp_attr {
629 unsigned slots;
630 ir_variable *var;
631
632 /* Used below in the call to qsort. */
633 static int compare(const void *a, const void *b)
634 {
635 const temp_attr *const l = (const temp_attr *) a;
636 const temp_attr *const r = (const temp_attr *) b;
637
638 /* Reversed because we want a descending order sort below. */
639 return r->slots - l->slots;
640 }
641 } to_assign[16];
642
643 unsigned num_attr = 0;
644
645 foreach_list(node, &sh->ir) {
646 ir_variable *const var = ((ir_instruction *) node)->as_variable();
647
648 if ((var == NULL) || (var->mode != ir_var_in))
649 continue;
650
651 /* The location was explicitly assigned, nothing to do here.
652 */
653 if (var->location != -1)
654 continue;
655
656 to_assign[num_attr].slots = count_attribute_slots(var->type);
657 to_assign[num_attr].var = var;
658 num_attr++;
659 }
660
661 /* If all of the attributes were assigned locations by the application (or
662 * are built-in attributes with fixed locations), return early. This should
663 * be the common case.
664 */
665 if (num_attr == 0)
666 return true;
667
668 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
669
670 for (unsigned i = 0; i < num_attr; i++) {
671 /* Mask representing the contiguous slots that will be used by this
672 * attribute.
673 */
674 const unsigned use_mask = (1 << to_assign[i].slots) - 1;
675
676 int location = find_available_slots(used_locations, to_assign[i].slots);
677
678 if (location < 0) {
679 linker_error_printf(prog,
680 "insufficient contiguous attribute locations "
681 "available for vertex shader input `%s'",
682 to_assign[i].var->name);
683 return false;
684 }
685
686 to_assign[i].var->location = VERT_ATTRIB_GENERIC0 + location;
687 used_locations |= (use_mask << location);
688 }
689
690 return true;
691 }
692
693
694 void
695 assign_varying_locations(glsl_shader *producer, glsl_shader *consumer)
696 {
697 /* FINISHME: Set dynamically when geometry shader support is added. */
698 unsigned output_index = VERT_RESULT_VAR0;
699 unsigned input_index = FRAG_ATTRIB_VAR0;
700
701 /* Operate in a total of three passes.
702 *
703 * 1. Assign locations for any matching inputs and outputs.
704 *
705 * 2. Mark output variables in the producer that do not have locations as
706 * not being outputs. This lets the optimizer eliminate them.
707 *
708 * 3. Mark input variables in the consumer that do not have locations as
709 * not being inputs. This lets the optimizer eliminate them.
710 */
711
712 invalidate_variable_locations(producer, ir_var_out, VERT_RESULT_VAR0);
713 invalidate_variable_locations(consumer, ir_var_in, FRAG_ATTRIB_VAR0);
714
715 foreach_list(node, &producer->ir) {
716 ir_variable *const output_var = ((ir_instruction *) node)->as_variable();
717
718 if ((output_var == NULL) || (output_var->mode != ir_var_out)
719 || (output_var->location != -1))
720 continue;
721
722 ir_variable *const input_var =
723 consumer->symbols->get_variable(output_var->name);
724
725 if ((input_var == NULL) || (input_var->mode != ir_var_in))
726 continue;
727
728 assert(input_var->location == -1);
729
730 /* FINISHME: Location assignment will need some changes when arrays,
731 * FINISHME: matrices, and structures are allowed as shader inputs /
732 * FINISHME: outputs.
733 */
734 output_var->location = output_index;
735 input_var->location = input_index;
736
737 output_index++;
738 input_index++;
739 }
740
741 foreach_list(node, &producer->ir) {
742 ir_variable *const var = ((ir_instruction *) node)->as_variable();
743
744 if ((var == NULL) || (var->mode != ir_var_out))
745 continue;
746
747 /* An 'out' variable is only really a shader output if its value is read
748 * by the following stage.
749 */
750 var->shader_out = (var->location != -1);
751 }
752
753 foreach_list(node, &consumer->ir) {
754 ir_variable *const var = ((ir_instruction *) node)->as_variable();
755
756 if ((var == NULL) || (var->mode != ir_var_in))
757 continue;
758
759 /* An 'in' variable is only really a shader input if its value is written
760 * by the previous stage.
761 */
762 var->shader_in = (var->location != -1);
763 }
764 }
765
766
767 void
768 link_shaders(struct glsl_program *prog)
769 {
770 prog->LinkStatus = false;
771 prog->Validated = false;
772 prog->_Used = false;
773
774 if (prog->InfoLog != NULL)
775 talloc_free(prog->InfoLog);
776
777 prog->InfoLog = talloc_strdup(NULL, "");
778
779 /* Separate the shaders into groups based on their type.
780 */
781 struct glsl_shader **vert_shader_list;
782 unsigned num_vert_shaders = 0;
783 struct glsl_shader **frag_shader_list;
784 unsigned num_frag_shaders = 0;
785
786 vert_shader_list = (struct glsl_shader **)
787 calloc(2 * prog->NumShaders, sizeof(struct glsl_shader *));
788 frag_shader_list = &vert_shader_list[prog->NumShaders];
789
790 for (unsigned i = 0; i < prog->NumShaders; i++) {
791 switch (prog->Shaders[i]->Type) {
792 case GL_VERTEX_SHADER:
793 vert_shader_list[num_vert_shaders] = prog->Shaders[i];
794 num_vert_shaders++;
795 break;
796 case GL_FRAGMENT_SHADER:
797 frag_shader_list[num_frag_shaders] = prog->Shaders[i];
798 num_frag_shaders++;
799 break;
800 case GL_GEOMETRY_SHADER:
801 /* FINISHME: Support geometry shaders. */
802 assert(prog->Shaders[i]->Type != GL_GEOMETRY_SHADER);
803 break;
804 }
805 }
806
807 /* FINISHME: Implement intra-stage linking. */
808 assert(num_vert_shaders <= 1);
809 assert(num_frag_shaders <= 1);
810
811 /* Verify that each of the per-target executables is valid.
812 */
813 if (!validate_vertex_shader_executable(prog, vert_shader_list[0])
814 || !validate_fragment_shader_executable(prog, frag_shader_list[0]))
815 goto done;
816
817
818 prog->_LinkedShaders = (struct glsl_shader **)
819 calloc(2, sizeof(struct glsl_shader *));
820 prog->_NumLinkedShaders = 0;
821
822 if (num_vert_shaders > 0) {
823 prog->_LinkedShaders[prog->_NumLinkedShaders] = vert_shader_list[0];
824 prog->_NumLinkedShaders++;
825 }
826
827 if (num_frag_shaders > 0) {
828 prog->_LinkedShaders[prog->_NumLinkedShaders] = frag_shader_list[0];
829 prog->_NumLinkedShaders++;
830 }
831
832 /* Here begins the inter-stage linking phase. Some initial validation is
833 * performed, then locations are assigned for uniforms, attributes, and
834 * varyings.
835 */
836 if (cross_validate_uniforms(prog)) {
837 /* Validate the inputs of each stage with the output of the preceeding
838 * stage.
839 */
840 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++) {
841 if (!cross_validate_outputs_to_inputs(prog,
842 prog->_LinkedShaders[i - 1],
843 prog->_LinkedShaders[i]))
844 goto done;
845 }
846
847 prog->LinkStatus = true;
848 }
849
850 /* FINISHME: Perform whole-program optimization here. */
851
852 assign_uniform_locations(prog);
853
854 if (prog->_LinkedShaders[0]->Type == GL_VERTEX_SHADER)
855 /* FINISHME: The value of the max_attribute_index parameter is
856 * FINISHME: implementation dependent based on the value of
857 * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
858 * FINISHME: at least 16, so hardcode 16 for now.
859 */
860 if (!assign_attribute_locations(prog, 16))
861 goto done;
862
863 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++)
864 assign_varying_locations(prog->_LinkedShaders[i - 1],
865 prog->_LinkedShaders[i]);
866
867 /* FINISHME: Assign fragment shader output locations. */
868
869 done:
870 free(vert_shader_list);
871 }