linker: Demote user-defined varyings in the VS-only case
[mesa.git] / src / glsl / linker.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file linker.cpp
26 * GLSL linker implementation
27 *
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
30 *
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
33 * together.
34 *
35 * - Undefined references in each shader are resolve to definitions in
36 * another shader.
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
41 *
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
44 *
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
47 *
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
51 * \c gl_FragColor.
52 *
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
55 *
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
63 *
64 * \author Ian Romanick <ian.d.romanick@intel.com>
65 */
66 #include <cstdlib>
67 #include <cstdio>
68 #include <cstdarg>
69 #include <climits>
70
71 extern "C" {
72 #include <talloc.h>
73 }
74
75 #include "main/compiler.h"
76 #include "main/mtypes.h"
77 #include "main/macros.h"
78 #include "main/shaderobj.h"
79 #include "glsl_symbol_table.h"
80 #include "ir.h"
81 #include "program.h"
82 #include "program/hash_table.h"
83 #include "linker.h"
84 #include "ir_optimization.h"
85
86 /**
87 * Visitor that determines whether or not a variable is ever written.
88 */
89 class find_assignment_visitor : public ir_hierarchical_visitor {
90 public:
91 find_assignment_visitor(const char *name)
92 : name(name), found(false)
93 {
94 /* empty */
95 }
96
97 virtual ir_visitor_status visit_enter(ir_assignment *ir)
98 {
99 ir_variable *const var = ir->lhs->variable_referenced();
100
101 if (strcmp(name, var->name) == 0) {
102 found = true;
103 return visit_stop;
104 }
105
106 return visit_continue_with_parent;
107 }
108
109 bool variable_found()
110 {
111 return found;
112 }
113
114 private:
115 const char *name; /**< Find writes to a variable with this name. */
116 bool found; /**< Was a write to the variable found? */
117 };
118
119
120 /**
121 * Visitor that determines whether or not a variable is ever read.
122 */
123 class find_deref_visitor : public ir_hierarchical_visitor {
124 public:
125 find_deref_visitor(const char *name)
126 : name(name), found(false)
127 {
128 /* empty */
129 }
130
131 virtual ir_visitor_status visit(ir_dereference_variable *ir)
132 {
133 if (strcmp(this->name, ir->var->name) == 0) {
134 this->found = true;
135 return visit_stop;
136 }
137
138 return visit_continue;
139 }
140
141 bool variable_found() const
142 {
143 return this->found;
144 }
145
146 private:
147 const char *name; /**< Find writes to a variable with this name. */
148 bool found; /**< Was a write to the variable found? */
149 };
150
151
152 void
153 linker_error_printf(gl_shader_program *prog, const char *fmt, ...)
154 {
155 va_list ap;
156
157 prog->InfoLog = talloc_strdup_append(prog->InfoLog, "error: ");
158 va_start(ap, fmt);
159 prog->InfoLog = talloc_vasprintf_append(prog->InfoLog, fmt, ap);
160 va_end(ap);
161 }
162
163
164 void
165 invalidate_variable_locations(gl_shader *sh, enum ir_variable_mode mode,
166 int generic_base)
167 {
168 foreach_list(node, sh->ir) {
169 ir_variable *const var = ((ir_instruction *) node)->as_variable();
170
171 if ((var == NULL) || (var->mode != (unsigned) mode))
172 continue;
173
174 /* Only assign locations for generic attributes / varyings / etc.
175 */
176 if (var->location >= generic_base)
177 var->location = -1;
178 }
179 }
180
181
182 /**
183 * Determine the number of attribute slots required for a particular type
184 *
185 * This code is here because it implements the language rules of a specific
186 * GLSL version. Since it's a property of the language and not a property of
187 * types in general, it doesn't really belong in glsl_type.
188 */
189 unsigned
190 count_attribute_slots(const glsl_type *t)
191 {
192 /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
193 *
194 * "A scalar input counts the same amount against this limit as a vec4,
195 * so applications may want to consider packing groups of four
196 * unrelated float inputs together into a vector to better utilize the
197 * capabilities of the underlying hardware. A matrix input will use up
198 * multiple locations. The number of locations used will equal the
199 * number of columns in the matrix."
200 *
201 * The spec does not explicitly say how arrays are counted. However, it
202 * should be safe to assume the total number of slots consumed by an array
203 * is the number of entries in the array multiplied by the number of slots
204 * consumed by a single element of the array.
205 */
206
207 if (t->is_array())
208 return t->array_size() * count_attribute_slots(t->element_type());
209
210 if (t->is_matrix())
211 return t->matrix_columns;
212
213 return 1;
214 }
215
216
217 /**
218 * Verify that a vertex shader executable meets all semantic requirements
219 *
220 * \param shader Vertex shader executable to be verified
221 */
222 bool
223 validate_vertex_shader_executable(struct gl_shader_program *prog,
224 struct gl_shader *shader)
225 {
226 if (shader == NULL)
227 return true;
228
229 find_assignment_visitor find("gl_Position");
230 find.run(shader->ir);
231 if (!find.variable_found()) {
232 linker_error_printf(prog,
233 "vertex shader does not write to `gl_Position'\n");
234 return false;
235 }
236
237 return true;
238 }
239
240
241 /**
242 * Verify that a fragment shader executable meets all semantic requirements
243 *
244 * \param shader Fragment shader executable to be verified
245 */
246 bool
247 validate_fragment_shader_executable(struct gl_shader_program *prog,
248 struct gl_shader *shader)
249 {
250 if (shader == NULL)
251 return true;
252
253 find_assignment_visitor frag_color("gl_FragColor");
254 find_assignment_visitor frag_data("gl_FragData");
255
256 frag_color.run(shader->ir);
257 frag_data.run(shader->ir);
258
259 if (frag_color.variable_found() && frag_data.variable_found()) {
260 linker_error_printf(prog, "fragment shader writes to both "
261 "`gl_FragColor' and `gl_FragData'\n");
262 return false;
263 }
264
265 return true;
266 }
267
268
269 /**
270 * Generate a string describing the mode of a variable
271 */
272 static const char *
273 mode_string(const ir_variable *var)
274 {
275 switch (var->mode) {
276 case ir_var_auto:
277 return (var->read_only) ? "global constant" : "global variable";
278
279 case ir_var_uniform: return "uniform";
280 case ir_var_in: return "shader input";
281 case ir_var_out: return "shader output";
282 case ir_var_inout: return "shader inout";
283
284 case ir_var_temporary:
285 default:
286 assert(!"Should not get here.");
287 return "invalid variable";
288 }
289 }
290
291
292 /**
293 * Perform validation of global variables used across multiple shaders
294 */
295 bool
296 cross_validate_globals(struct gl_shader_program *prog,
297 struct gl_shader **shader_list,
298 unsigned num_shaders,
299 bool uniforms_only)
300 {
301 /* Examine all of the uniforms in all of the shaders and cross validate
302 * them.
303 */
304 glsl_symbol_table variables;
305 for (unsigned i = 0; i < num_shaders; i++) {
306 foreach_list(node, shader_list[i]->ir) {
307 ir_variable *const var = ((ir_instruction *) node)->as_variable();
308
309 if (var == NULL)
310 continue;
311
312 if (uniforms_only && (var->mode != ir_var_uniform))
313 continue;
314
315 /* Don't cross validate temporaries that are at global scope. These
316 * will eventually get pulled into the shaders 'main'.
317 */
318 if (var->mode == ir_var_temporary)
319 continue;
320
321 /* If a global with this name has already been seen, verify that the
322 * new instance has the same type. In addition, if the globals have
323 * initializers, the values of the initializers must be the same.
324 */
325 ir_variable *const existing = variables.get_variable(var->name);
326 if (existing != NULL) {
327 if (var->type != existing->type) {
328 linker_error_printf(prog, "%s `%s' declared as type "
329 "`%s' and type `%s'\n",
330 mode_string(var),
331 var->name, var->type->name,
332 existing->type->name);
333 return false;
334 }
335
336 /* FINISHME: Handle non-constant initializers.
337 */
338 if (var->constant_value != NULL) {
339 if (existing->constant_value != NULL) {
340 if (!var->constant_value->has_value(existing->constant_value)) {
341 linker_error_printf(prog, "initializers for %s "
342 "`%s' have differing values\n",
343 mode_string(var), var->name);
344 return false;
345 }
346 } else
347 /* If the first-seen instance of a particular uniform did not
348 * have an initializer but a later instance does, copy the
349 * initializer to the version stored in the symbol table.
350 */
351 /* FINISHME: This is wrong. The constant_value field should
352 * FINISHME: not be modified! Imagine a case where a shader
353 * FINISHME: without an initializer is linked in two different
354 * FINISHME: programs with shaders that have differing
355 * FINISHME: initializers. Linking with the first will
356 * FINISHME: modify the shader, and linking with the second
357 * FINISHME: will fail.
358 */
359 existing->constant_value =
360 var->constant_value->clone(talloc_parent(existing), NULL);
361 }
362 } else
363 variables.add_variable(var->name, var);
364 }
365 }
366
367 return true;
368 }
369
370
371 /**
372 * Perform validation of uniforms used across multiple shader stages
373 */
374 bool
375 cross_validate_uniforms(struct gl_shader_program *prog)
376 {
377 return cross_validate_globals(prog, prog->_LinkedShaders,
378 prog->_NumLinkedShaders, true);
379 }
380
381
382 /**
383 * Validate that outputs from one stage match inputs of another
384 */
385 bool
386 cross_validate_outputs_to_inputs(struct gl_shader_program *prog,
387 gl_shader *producer, gl_shader *consumer)
388 {
389 glsl_symbol_table parameters;
390 /* FINISHME: Figure these out dynamically. */
391 const char *const producer_stage = "vertex";
392 const char *const consumer_stage = "fragment";
393
394 /* Find all shader outputs in the "producer" stage.
395 */
396 foreach_list(node, producer->ir) {
397 ir_variable *const var = ((ir_instruction *) node)->as_variable();
398
399 /* FINISHME: For geometry shaders, this should also look for inout
400 * FINISHME: variables.
401 */
402 if ((var == NULL) || (var->mode != ir_var_out))
403 continue;
404
405 parameters.add_variable(var->name, var);
406 }
407
408
409 /* Find all shader inputs in the "consumer" stage. Any variables that have
410 * matching outputs already in the symbol table must have the same type and
411 * qualifiers.
412 */
413 foreach_list(node, consumer->ir) {
414 ir_variable *const input = ((ir_instruction *) node)->as_variable();
415
416 /* FINISHME: For geometry shaders, this should also look for inout
417 * FINISHME: variables.
418 */
419 if ((input == NULL) || (input->mode != ir_var_in))
420 continue;
421
422 ir_variable *const output = parameters.get_variable(input->name);
423 if (output != NULL) {
424 /* Check that the types match between stages.
425 */
426 if (input->type != output->type) {
427 linker_error_printf(prog,
428 "%s shader output `%s' delcared as "
429 "type `%s', but %s shader input declared "
430 "as type `%s'\n",
431 producer_stage, output->name,
432 output->type->name,
433 consumer_stage, input->type->name);
434 return false;
435 }
436
437 /* Check that all of the qualifiers match between stages.
438 */
439 if (input->centroid != output->centroid) {
440 linker_error_printf(prog,
441 "%s shader output `%s' %s centroid qualifier, "
442 "but %s shader input %s centroid qualifier\n",
443 producer_stage,
444 output->name,
445 (output->centroid) ? "has" : "lacks",
446 consumer_stage,
447 (input->centroid) ? "has" : "lacks");
448 return false;
449 }
450
451 if (input->invariant != output->invariant) {
452 linker_error_printf(prog,
453 "%s shader output `%s' %s invariant qualifier, "
454 "but %s shader input %s invariant qualifier\n",
455 producer_stage,
456 output->name,
457 (output->invariant) ? "has" : "lacks",
458 consumer_stage,
459 (input->invariant) ? "has" : "lacks");
460 return false;
461 }
462
463 if (input->interpolation != output->interpolation) {
464 linker_error_printf(prog,
465 "%s shader output `%s' specifies %s "
466 "interpolation qualifier, "
467 "but %s shader input specifies %s "
468 "interpolation qualifier\n",
469 producer_stage,
470 output->name,
471 output->interpolation_string(),
472 consumer_stage,
473 input->interpolation_string());
474 return false;
475 }
476 }
477 }
478
479 return true;
480 }
481
482
483 /**
484 * Populates a shaders symbol table with all global declarations
485 */
486 static void
487 populate_symbol_table(gl_shader *sh)
488 {
489 sh->symbols = new(sh) glsl_symbol_table;
490
491 foreach_list(node, sh->ir) {
492 ir_instruction *const inst = (ir_instruction *) node;
493 ir_variable *var;
494 ir_function *func;
495
496 if ((func = inst->as_function()) != NULL) {
497 sh->symbols->add_function(func->name, func);
498 } else if ((var = inst->as_variable()) != NULL) {
499 sh->symbols->add_variable(var->name, var);
500 }
501 }
502 }
503
504
505 /**
506 * Remap variables referenced in an instruction tree
507 *
508 * This is used when instruction trees are cloned from one shader and placed in
509 * another. These trees will contain references to \c ir_variable nodes that
510 * do not exist in the target shader. This function finds these \c ir_variable
511 * references and replaces the references with matching variables in the target
512 * shader.
513 *
514 * If there is no matching variable in the target shader, a clone of the
515 * \c ir_variable is made and added to the target shader. The new variable is
516 * added to \b both the instruction stream and the symbol table.
517 *
518 * \param inst IR tree that is to be processed.
519 * \param symbols Symbol table containing global scope symbols in the
520 * linked shader.
521 * \param instructions Instruction stream where new variable declarations
522 * should be added.
523 */
524 void
525 remap_variables(ir_instruction *inst, struct gl_shader *target,
526 hash_table *temps)
527 {
528 class remap_visitor : public ir_hierarchical_visitor {
529 public:
530 remap_visitor(struct gl_shader *target,
531 hash_table *temps)
532 {
533 this->target = target;
534 this->symbols = target->symbols;
535 this->instructions = target->ir;
536 this->temps = temps;
537 }
538
539 virtual ir_visitor_status visit(ir_dereference_variable *ir)
540 {
541 if (ir->var->mode == ir_var_temporary) {
542 ir_variable *var = (ir_variable *) hash_table_find(temps, ir->var);
543
544 assert(var != NULL);
545 ir->var = var;
546 return visit_continue;
547 }
548
549 ir_variable *const existing =
550 this->symbols->get_variable(ir->var->name);
551 if (existing != NULL)
552 ir->var = existing;
553 else {
554 ir_variable *copy = ir->var->clone(this->target, NULL);
555
556 this->symbols->add_variable(copy->name, copy);
557 this->instructions->push_head(copy);
558 ir->var = copy;
559 }
560
561 return visit_continue;
562 }
563
564 private:
565 struct gl_shader *target;
566 glsl_symbol_table *symbols;
567 exec_list *instructions;
568 hash_table *temps;
569 };
570
571 remap_visitor v(target, temps);
572
573 inst->accept(&v);
574 }
575
576
577 /**
578 * Move non-declarations from one instruction stream to another
579 *
580 * The intended usage pattern of this function is to pass the pointer to the
581 * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
582 * pointer) for \c last and \c false for \c make_copies on the first
583 * call. Successive calls pass the return value of the previous call for
584 * \c last and \c true for \c make_copies.
585 *
586 * \param instructions Source instruction stream
587 * \param last Instruction after which new instructions should be
588 * inserted in the target instruction stream
589 * \param make_copies Flag selecting whether instructions in \c instructions
590 * should be copied (via \c ir_instruction::clone) into the
591 * target list or moved.
592 *
593 * \return
594 * The new "last" instruction in the target instruction stream. This pointer
595 * is suitable for use as the \c last parameter of a later call to this
596 * function.
597 */
598 exec_node *
599 move_non_declarations(exec_list *instructions, exec_node *last,
600 bool make_copies, gl_shader *target)
601 {
602 hash_table *temps = NULL;
603
604 if (make_copies)
605 temps = hash_table_ctor(0, hash_table_pointer_hash,
606 hash_table_pointer_compare);
607
608 foreach_list_safe(node, instructions) {
609 ir_instruction *inst = (ir_instruction *) node;
610
611 if (inst->as_function())
612 continue;
613
614 ir_variable *var = inst->as_variable();
615 if ((var != NULL) && (var->mode != ir_var_temporary))
616 continue;
617
618 assert(inst->as_assignment()
619 || ((var != NULL) && (var->mode == ir_var_temporary)));
620
621 if (make_copies) {
622 inst = inst->clone(target, NULL);
623
624 if (var != NULL)
625 hash_table_insert(temps, inst, var);
626 else
627 remap_variables(inst, target, temps);
628 } else {
629 inst->remove();
630 }
631
632 last->insert_after(inst);
633 last = inst;
634 }
635
636 if (make_copies)
637 hash_table_dtor(temps);
638
639 return last;
640 }
641
642 /**
643 * Get the function signature for main from a shader
644 */
645 static ir_function_signature *
646 get_main_function_signature(gl_shader *sh)
647 {
648 ir_function *const f = sh->symbols->get_function("main");
649 if (f != NULL) {
650 exec_list void_parameters;
651
652 /* Look for the 'void main()' signature and ensure that it's defined.
653 * This keeps the linker from accidentally pick a shader that just
654 * contains a prototype for main.
655 *
656 * We don't have to check for multiple definitions of main (in multiple
657 * shaders) because that would have already been caught above.
658 */
659 ir_function_signature *sig = f->matching_signature(&void_parameters);
660 if ((sig != NULL) && sig->is_defined) {
661 return sig;
662 }
663 }
664
665 return NULL;
666 }
667
668
669 /**
670 * Combine a group of shaders for a single stage to generate a linked shader
671 *
672 * \note
673 * If this function is supplied a single shader, it is cloned, and the new
674 * shader is returned.
675 */
676 static struct gl_shader *
677 link_intrastage_shaders(struct gl_shader_program *prog,
678 struct gl_shader **shader_list,
679 unsigned num_shaders)
680 {
681 /* Check that global variables defined in multiple shaders are consistent.
682 */
683 if (!cross_validate_globals(prog, shader_list, num_shaders, false))
684 return NULL;
685
686 /* Check that there is only a single definition of each function signature
687 * across all shaders.
688 */
689 for (unsigned i = 0; i < (num_shaders - 1); i++) {
690 foreach_list(node, shader_list[i]->ir) {
691 ir_function *const f = ((ir_instruction *) node)->as_function();
692
693 if (f == NULL)
694 continue;
695
696 for (unsigned j = i + 1; j < num_shaders; j++) {
697 ir_function *const other =
698 shader_list[j]->symbols->get_function(f->name);
699
700 /* If the other shader has no function (and therefore no function
701 * signatures) with the same name, skip to the next shader.
702 */
703 if (other == NULL)
704 continue;
705
706 foreach_iter (exec_list_iterator, iter, *f) {
707 ir_function_signature *sig =
708 (ir_function_signature *) iter.get();
709
710 if (!sig->is_defined || sig->is_built_in)
711 continue;
712
713 ir_function_signature *other_sig =
714 other->exact_matching_signature(& sig->parameters);
715
716 if ((other_sig != NULL) && other_sig->is_defined
717 && !other_sig->is_built_in) {
718 linker_error_printf(prog,
719 "function `%s' is multiply defined",
720 f->name);
721 return NULL;
722 }
723 }
724 }
725 }
726 }
727
728 /* Find the shader that defines main, and make a clone of it.
729 *
730 * Starting with the clone, search for undefined references. If one is
731 * found, find the shader that defines it. Clone the reference and add
732 * it to the shader. Repeat until there are no undefined references or
733 * until a reference cannot be resolved.
734 */
735 gl_shader *main = NULL;
736 for (unsigned i = 0; i < num_shaders; i++) {
737 if (get_main_function_signature(shader_list[i]) != NULL) {
738 main = shader_list[i];
739 break;
740 }
741 }
742
743 if (main == NULL) {
744 linker_error_printf(prog, "%s shader lacks `main'\n",
745 (shader_list[0]->Type == GL_VERTEX_SHADER)
746 ? "vertex" : "fragment");
747 return NULL;
748 }
749
750 gl_shader *const linked = _mesa_new_shader(NULL, 0, main->Type);
751 linked->ir = new(linked) exec_list;
752 clone_ir_list(linked, linked->ir, main->ir);
753
754 populate_symbol_table(linked);
755
756 /* The a pointer to the main function in the final linked shader (i.e., the
757 * copy of the original shader that contained the main function).
758 */
759 ir_function_signature *const main_sig = get_main_function_signature(linked);
760
761 /* Move any instructions other than variable declarations or function
762 * declarations into main.
763 */
764 exec_node *insertion_point =
765 move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
766 linked);
767
768 for (unsigned i = 0; i < num_shaders; i++) {
769 if (shader_list[i] == main)
770 continue;
771
772 insertion_point = move_non_declarations(shader_list[i]->ir,
773 insertion_point, true, linked);
774 }
775
776 /* Resolve initializers for global variables in the linked shader.
777 */
778 unsigned num_linking_shaders = num_shaders;
779 for (unsigned i = 0; i < num_shaders; i++)
780 num_linking_shaders += shader_list[i]->num_builtins_to_link;
781
782 gl_shader **linking_shaders =
783 (gl_shader **) calloc(num_linking_shaders, sizeof(gl_shader *));
784
785 memcpy(linking_shaders, shader_list,
786 sizeof(linking_shaders[0]) * num_shaders);
787
788 unsigned idx = num_shaders;
789 for (unsigned i = 0; i < num_shaders; i++) {
790 memcpy(&linking_shaders[idx], shader_list[i]->builtins_to_link,
791 sizeof(linking_shaders[0]) * shader_list[i]->num_builtins_to_link);
792 idx += shader_list[i]->num_builtins_to_link;
793 }
794
795 assert(idx == num_linking_shaders);
796
797 link_function_calls(prog, linked, linking_shaders, num_linking_shaders);
798
799 free(linking_shaders);
800
801 return linked;
802 }
803
804
805 struct uniform_node {
806 exec_node link;
807 struct gl_uniform *u;
808 unsigned slots;
809 };
810
811 void
812 assign_uniform_locations(struct gl_shader_program *prog)
813 {
814 /* */
815 exec_list uniforms;
816 unsigned total_uniforms = 0;
817 hash_table *ht = hash_table_ctor(32, hash_table_string_hash,
818 hash_table_string_compare);
819
820 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
821 unsigned next_position = 0;
822
823 foreach_list(node, prog->_LinkedShaders[i]->ir) {
824 ir_variable *const var = ((ir_instruction *) node)->as_variable();
825
826 if ((var == NULL) || (var->mode != ir_var_uniform))
827 continue;
828
829 const unsigned vec4_slots = (var->component_slots() + 3) / 4;
830 if (vec4_slots == 0) {
831 /* If we've got a sampler or an aggregate of them, the size can
832 * end up zero. Don't allocate any space.
833 */
834 continue;
835 }
836
837 uniform_node *n = (uniform_node *) hash_table_find(ht, var->name);
838 if (n == NULL) {
839 n = (uniform_node *) calloc(1, sizeof(struct uniform_node));
840 n->u = (gl_uniform *) calloc(vec4_slots, sizeof(struct gl_uniform));
841 n->slots = vec4_slots;
842
843 n->u[0].Name = strdup(var->name);
844 for (unsigned j = 1; j < vec4_slots; j++)
845 n->u[j].Name = n->u[0].Name;
846
847 hash_table_insert(ht, n, n->u[0].Name);
848 uniforms.push_tail(& n->link);
849 total_uniforms += vec4_slots;
850 }
851
852 if (var->constant_value != NULL)
853 for (unsigned j = 0; j < vec4_slots; j++)
854 n->u[j].Initialized = true;
855
856 var->location = next_position;
857
858 for (unsigned j = 0; j < vec4_slots; j++) {
859 switch (prog->_LinkedShaders[i]->Type) {
860 case GL_VERTEX_SHADER:
861 n->u[j].VertPos = next_position;
862 break;
863 case GL_FRAGMENT_SHADER:
864 n->u[j].FragPos = next_position;
865 break;
866 case GL_GEOMETRY_SHADER:
867 /* FINISHME: Support geometry shaders. */
868 assert(prog->_LinkedShaders[i]->Type != GL_GEOMETRY_SHADER);
869 break;
870 }
871
872 next_position++;
873 }
874 }
875 }
876
877 gl_uniform_list *ul = (gl_uniform_list *)
878 calloc(1, sizeof(gl_uniform_list));
879
880 ul->Size = total_uniforms;
881 ul->NumUniforms = total_uniforms;
882 ul->Uniforms = (gl_uniform *) calloc(total_uniforms, sizeof(gl_uniform));
883
884 unsigned idx = 0;
885 uniform_node *next;
886 for (uniform_node *node = (uniform_node *) uniforms.head
887 ; node->link.next != NULL
888 ; node = next) {
889 next = (uniform_node *) node->link.next;
890
891 node->link.remove();
892 memcpy(&ul->Uniforms[idx], node->u, sizeof(gl_uniform) * node->slots);
893 idx += node->slots;
894
895 free(node->u);
896 free(node);
897 }
898
899 hash_table_dtor(ht);
900
901 prog->Uniforms = ul;
902 }
903
904
905 /**
906 * Find a contiguous set of available bits in a bitmask
907 *
908 * \param used_mask Bits representing used (1) and unused (0) locations
909 * \param needed_count Number of contiguous bits needed.
910 *
911 * \return
912 * Base location of the available bits on success or -1 on failure.
913 */
914 int
915 find_available_slots(unsigned used_mask, unsigned needed_count)
916 {
917 unsigned needed_mask = (1 << needed_count) - 1;
918 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
919
920 /* The comparison to 32 is redundant, but without it GCC emits "warning:
921 * cannot optimize possibly infinite loops" for the loop below.
922 */
923 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
924 return -1;
925
926 for (int i = 0; i <= max_bit_to_test; i++) {
927 if ((needed_mask & ~used_mask) == needed_mask)
928 return i;
929
930 needed_mask <<= 1;
931 }
932
933 return -1;
934 }
935
936
937 bool
938 assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index)
939 {
940 /* Mark invalid attribute locations as being used.
941 */
942 unsigned used_locations = (max_attribute_index >= 32)
943 ? ~0 : ~((1 << max_attribute_index) - 1);
944
945 gl_shader *const sh = prog->_LinkedShaders[0];
946 assert(sh->Type == GL_VERTEX_SHADER);
947
948 /* Operate in a total of four passes.
949 *
950 * 1. Invalidate the location assignments for all vertex shader inputs.
951 *
952 * 2. Assign locations for inputs that have user-defined (via
953 * glBindVertexAttribLocation) locatoins.
954 *
955 * 3. Sort the attributes without assigned locations by number of slots
956 * required in decreasing order. Fragmentation caused by attribute
957 * locations assigned by the application may prevent large attributes
958 * from having enough contiguous space.
959 *
960 * 4. Assign locations to any inputs without assigned locations.
961 */
962
963 invalidate_variable_locations(sh, ir_var_in, VERT_ATTRIB_GENERIC0);
964
965 if (prog->Attributes != NULL) {
966 for (unsigned i = 0; i < prog->Attributes->NumParameters; i++) {
967 ir_variable *const var =
968 sh->symbols->get_variable(prog->Attributes->Parameters[i].Name);
969
970 /* Note: attributes that occupy multiple slots, such as arrays or
971 * matrices, may appear in the attrib array multiple times.
972 */
973 if ((var == NULL) || (var->location != -1))
974 continue;
975
976 /* From page 61 of the OpenGL 4.0 spec:
977 *
978 * "LinkProgram will fail if the attribute bindings assigned by
979 * BindAttribLocation do not leave not enough space to assign a
980 * location for an active matrix attribute or an active attribute
981 * array, both of which require multiple contiguous generic
982 * attributes."
983 *
984 * Previous versions of the spec contain similar language but omit the
985 * bit about attribute arrays.
986 *
987 * Page 61 of the OpenGL 4.0 spec also says:
988 *
989 * "It is possible for an application to bind more than one
990 * attribute name to the same location. This is referred to as
991 * aliasing. This will only work if only one of the aliased
992 * attributes is active in the executable program, or if no path
993 * through the shader consumes more than one attribute of a set
994 * of attributes aliased to the same location. A link error can
995 * occur if the linker determines that every path through the
996 * shader consumes multiple aliased attributes, but
997 * implementations are not required to generate an error in this
998 * case."
999 *
1000 * These two paragraphs are either somewhat contradictory, or I don't
1001 * fully understand one or both of them.
1002 */
1003 /* FINISHME: The code as currently written does not support attribute
1004 * FINISHME: location aliasing (see comment above).
1005 */
1006 const int attr = prog->Attributes->Parameters[i].StateIndexes[0];
1007 const unsigned slots = count_attribute_slots(var->type);
1008
1009 /* Mask representing the contiguous slots that will be used by this
1010 * attribute.
1011 */
1012 const unsigned use_mask = (1 << slots) - 1;
1013
1014 /* Generate a link error if the set of bits requested for this
1015 * attribute overlaps any previously allocated bits.
1016 */
1017 if ((~(use_mask << attr) & used_locations) != used_locations) {
1018 linker_error_printf(prog,
1019 "insufficient contiguous attribute locations "
1020 "available for vertex shader input `%s'",
1021 var->name);
1022 return false;
1023 }
1024
1025 var->location = VERT_ATTRIB_GENERIC0 + attr;
1026 used_locations |= (use_mask << attr);
1027 }
1028 }
1029
1030 /* Temporary storage for the set of attributes that need locations assigned.
1031 */
1032 struct temp_attr {
1033 unsigned slots;
1034 ir_variable *var;
1035
1036 /* Used below in the call to qsort. */
1037 static int compare(const void *a, const void *b)
1038 {
1039 const temp_attr *const l = (const temp_attr *) a;
1040 const temp_attr *const r = (const temp_attr *) b;
1041
1042 /* Reversed because we want a descending order sort below. */
1043 return r->slots - l->slots;
1044 }
1045 } to_assign[16];
1046
1047 unsigned num_attr = 0;
1048
1049 foreach_list(node, sh->ir) {
1050 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1051
1052 if ((var == NULL) || (var->mode != ir_var_in))
1053 continue;
1054
1055 /* The location was explicitly assigned, nothing to do here.
1056 */
1057 if (var->location != -1)
1058 continue;
1059
1060 to_assign[num_attr].slots = count_attribute_slots(var->type);
1061 to_assign[num_attr].var = var;
1062 num_attr++;
1063 }
1064
1065 /* If all of the attributes were assigned locations by the application (or
1066 * are built-in attributes with fixed locations), return early. This should
1067 * be the common case.
1068 */
1069 if (num_attr == 0)
1070 return true;
1071
1072 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
1073
1074 /* VERT_ATTRIB_GENERIC0 is a psdueo-alias for VERT_ATTRIB_POS. It can only
1075 * be explicitly assigned by via glBindAttribLocation. Mark it as reserved
1076 * to prevent it from being automatically allocated below.
1077 */
1078 find_deref_visitor find("gl_Vertex");
1079 find.run(sh->ir);
1080 if (find.variable_found())
1081 used_locations |= (1 << 0);
1082
1083 for (unsigned i = 0; i < num_attr; i++) {
1084 /* Mask representing the contiguous slots that will be used by this
1085 * attribute.
1086 */
1087 const unsigned use_mask = (1 << to_assign[i].slots) - 1;
1088
1089 int location = find_available_slots(used_locations, to_assign[i].slots);
1090
1091 if (location < 0) {
1092 linker_error_printf(prog,
1093 "insufficient contiguous attribute locations "
1094 "available for vertex shader input `%s'",
1095 to_assign[i].var->name);
1096 return false;
1097 }
1098
1099 to_assign[i].var->location = VERT_ATTRIB_GENERIC0 + location;
1100 used_locations |= (use_mask << location);
1101 }
1102
1103 return true;
1104 }
1105
1106
1107 /**
1108 * Demote shader outputs that are not read to being just plain global variables
1109 */
1110 void
1111 demote_unread_shader_outputs(gl_shader *sh)
1112 {
1113 foreach_list(node, sh->ir) {
1114 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1115
1116 if ((var == NULL) || (var->mode != ir_var_out))
1117 continue;
1118
1119 /* An 'out' variable is only really a shader output if its value is read
1120 * by the following stage.
1121 */
1122 if (var->location == -1) {
1123 var->mode = ir_var_auto;
1124 }
1125 }
1126 }
1127
1128
1129 void
1130 assign_varying_locations(struct gl_shader_program *prog,
1131 gl_shader *producer, gl_shader *consumer)
1132 {
1133 /* FINISHME: Set dynamically when geometry shader support is added. */
1134 unsigned output_index = VERT_RESULT_VAR0;
1135 unsigned input_index = FRAG_ATTRIB_VAR0;
1136
1137 /* Operate in a total of three passes.
1138 *
1139 * 1. Assign locations for any matching inputs and outputs.
1140 *
1141 * 2. Mark output variables in the producer that do not have locations as
1142 * not being outputs. This lets the optimizer eliminate them.
1143 *
1144 * 3. Mark input variables in the consumer that do not have locations as
1145 * not being inputs. This lets the optimizer eliminate them.
1146 */
1147
1148 invalidate_variable_locations(producer, ir_var_out, VERT_RESULT_VAR0);
1149 invalidate_variable_locations(consumer, ir_var_in, FRAG_ATTRIB_VAR0);
1150
1151 foreach_list(node, producer->ir) {
1152 ir_variable *const output_var = ((ir_instruction *) node)->as_variable();
1153
1154 if ((output_var == NULL) || (output_var->mode != ir_var_out)
1155 || (output_var->location != -1))
1156 continue;
1157
1158 ir_variable *const input_var =
1159 consumer->symbols->get_variable(output_var->name);
1160
1161 if ((input_var == NULL) || (input_var->mode != ir_var_in))
1162 continue;
1163
1164 assert(input_var->location == -1);
1165
1166 /* FINISHME: Location assignment will need some changes when arrays,
1167 * FINISHME: matrices, and structures are allowed as shader inputs /
1168 * FINISHME: outputs.
1169 */
1170 output_var->location = output_index;
1171 input_var->location = input_index;
1172
1173 output_index++;
1174 input_index++;
1175 }
1176
1177 demote_unread_shader_outputs(producer);
1178
1179 foreach_list(node, consumer->ir) {
1180 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1181
1182 if ((var == NULL) || (var->mode != ir_var_in))
1183 continue;
1184
1185 if (var->location == -1) {
1186 if (prog->Version <= 120) {
1187 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
1188 *
1189 * Only those varying variables used (i.e. read) in
1190 * the fragment shader executable must be written to
1191 * by the vertex shader executable; declaring
1192 * superfluous varying variables in a vertex shader is
1193 * permissible.
1194 *
1195 * We interpret this text as meaning that the VS must
1196 * write the variable for the FS to read it. See
1197 * "glsl1-varying read but not written" in piglit.
1198 */
1199
1200 linker_error_printf(prog, "fragment shader varying %s not written "
1201 "by vertex shader\n.", var->name);
1202 prog->LinkStatus = false;
1203 }
1204
1205 /* An 'in' variable is only really a shader input if its
1206 * value is written by the previous stage.
1207 */
1208 var->mode = ir_var_auto;
1209 }
1210 }
1211 }
1212
1213
1214 void
1215 link_shaders(struct gl_shader_program *prog)
1216 {
1217 prog->LinkStatus = false;
1218 prog->Validated = false;
1219 prog->_Used = false;
1220
1221 if (prog->InfoLog != NULL)
1222 talloc_free(prog->InfoLog);
1223
1224 prog->InfoLog = talloc_strdup(NULL, "");
1225
1226 /* Separate the shaders into groups based on their type.
1227 */
1228 struct gl_shader **vert_shader_list;
1229 unsigned num_vert_shaders = 0;
1230 struct gl_shader **frag_shader_list;
1231 unsigned num_frag_shaders = 0;
1232
1233 vert_shader_list = (struct gl_shader **)
1234 calloc(2 * prog->NumShaders, sizeof(struct gl_shader *));
1235 frag_shader_list = &vert_shader_list[prog->NumShaders];
1236
1237 unsigned min_version = UINT_MAX;
1238 unsigned max_version = 0;
1239 for (unsigned i = 0; i < prog->NumShaders; i++) {
1240 min_version = MIN2(min_version, prog->Shaders[i]->Version);
1241 max_version = MAX2(max_version, prog->Shaders[i]->Version);
1242
1243 switch (prog->Shaders[i]->Type) {
1244 case GL_VERTEX_SHADER:
1245 vert_shader_list[num_vert_shaders] = prog->Shaders[i];
1246 num_vert_shaders++;
1247 break;
1248 case GL_FRAGMENT_SHADER:
1249 frag_shader_list[num_frag_shaders] = prog->Shaders[i];
1250 num_frag_shaders++;
1251 break;
1252 case GL_GEOMETRY_SHADER:
1253 /* FINISHME: Support geometry shaders. */
1254 assert(prog->Shaders[i]->Type != GL_GEOMETRY_SHADER);
1255 break;
1256 }
1257 }
1258
1259 /* Previous to GLSL version 1.30, different compilation units could mix and
1260 * match shading language versions. With GLSL 1.30 and later, the versions
1261 * of all shaders must match.
1262 */
1263 assert(min_version >= 110);
1264 assert(max_version <= 130);
1265 if ((max_version >= 130) && (min_version != max_version)) {
1266 linker_error_printf(prog, "all shaders must use same shading "
1267 "language version\n");
1268 goto done;
1269 }
1270
1271 prog->Version = max_version;
1272
1273 /* Link all shaders for a particular stage and validate the result.
1274 */
1275 prog->_NumLinkedShaders = 0;
1276 if (num_vert_shaders > 0) {
1277 gl_shader *const sh =
1278 link_intrastage_shaders(prog, vert_shader_list, num_vert_shaders);
1279
1280 if (sh == NULL)
1281 goto done;
1282
1283 if (!validate_vertex_shader_executable(prog, sh))
1284 goto done;
1285
1286 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh;
1287 prog->_NumLinkedShaders++;
1288 }
1289
1290 if (num_frag_shaders > 0) {
1291 gl_shader *const sh =
1292 link_intrastage_shaders(prog, frag_shader_list, num_frag_shaders);
1293
1294 if (sh == NULL)
1295 goto done;
1296
1297 if (!validate_fragment_shader_executable(prog, sh))
1298 goto done;
1299
1300 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh;
1301 prog->_NumLinkedShaders++;
1302 }
1303
1304 /* Here begins the inter-stage linking phase. Some initial validation is
1305 * performed, then locations are assigned for uniforms, attributes, and
1306 * varyings.
1307 */
1308 if (cross_validate_uniforms(prog)) {
1309 /* Validate the inputs of each stage with the output of the preceeding
1310 * stage.
1311 */
1312 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++) {
1313 if (!cross_validate_outputs_to_inputs(prog,
1314 prog->_LinkedShaders[i - 1],
1315 prog->_LinkedShaders[i]))
1316 goto done;
1317 }
1318
1319 prog->LinkStatus = true;
1320 }
1321
1322 /* Do common optimization before assigning storage for attributes,
1323 * uniforms, and varyings. Later optimization could possibly make
1324 * some of that unused.
1325 */
1326 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
1327 while (do_common_optimization(prog->_LinkedShaders[i]->ir, true))
1328 ;
1329 }
1330
1331 assign_uniform_locations(prog);
1332
1333 if (prog->_LinkedShaders[0]->Type == GL_VERTEX_SHADER) {
1334 /* FINISHME: The value of the max_attribute_index parameter is
1335 * FINISHME: implementation dependent based on the value of
1336 * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
1337 * FINISHME: at least 16, so hardcode 16 for now.
1338 */
1339 if (!assign_attribute_locations(prog, 16))
1340 goto done;
1341
1342 if (prog->_NumLinkedShaders == 1)
1343 demote_unread_shader_outputs(prog->_LinkedShaders[0]);
1344 }
1345
1346 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++)
1347 assign_varying_locations(prog,
1348 prog->_LinkedShaders[i - 1],
1349 prog->_LinkedShaders[i]);
1350
1351 /* FINISHME: Assign fragment shader output locations. */
1352
1353 done:
1354 free(vert_shader_list);
1355 }