glsl2: When linking makes a variable not a varying output, make it ir_var_auto.
[mesa.git] / src / glsl / linker.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file linker.cpp
26 * GLSL linker implementation
27 *
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
30 *
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
33 * together.
34 *
35 * - Undefined references in each shader are resolve to definitions in
36 * another shader.
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
41 *
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
44 *
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
47 *
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
51 * \c gl_FragColor.
52 *
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
55 *
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
63 *
64 * \author Ian Romanick <ian.d.romanick@intel.com>
65 */
66 #include <cstdlib>
67 #include <cstdio>
68 #include <cstdarg>
69
70 extern "C" {
71 #include <talloc.h>
72 }
73
74 #include "main/mtypes.h"
75 #include "glsl_symbol_table.h"
76 #include "glsl_parser_extras.h"
77 #include "ir.h"
78 #include "ir_optimization.h"
79 #include "program.h"
80 #include "hash_table.h"
81 #include "shader_api.h"
82
83 /**
84 * Visitor that determines whether or not a variable is ever written.
85 */
86 class find_assignment_visitor : public ir_hierarchical_visitor {
87 public:
88 find_assignment_visitor(const char *name)
89 : name(name), found(false)
90 {
91 /* empty */
92 }
93
94 virtual ir_visitor_status visit_enter(ir_assignment *ir)
95 {
96 ir_variable *const var = ir->lhs->variable_referenced();
97
98 if (strcmp(name, var->name) == 0) {
99 found = true;
100 return visit_stop;
101 }
102
103 return visit_continue_with_parent;
104 }
105
106 bool variable_found()
107 {
108 return found;
109 }
110
111 private:
112 const char *name; /**< Find writes to a variable with this name. */
113 bool found; /**< Was a write to the variable found? */
114 };
115
116
117 void
118 linker_error_printf(gl_shader_program *prog, const char *fmt, ...)
119 {
120 va_list ap;
121
122 prog->InfoLog = talloc_strdup_append(prog->InfoLog, "error: ");
123 va_start(ap, fmt);
124 prog->InfoLog = talloc_vasprintf_append(prog->InfoLog, fmt, ap);
125 va_end(ap);
126 }
127
128
129 void
130 invalidate_variable_locations(gl_shader *sh, enum ir_variable_mode mode,
131 int generic_base)
132 {
133 foreach_list(node, sh->ir) {
134 ir_variable *const var = ((ir_instruction *) node)->as_variable();
135
136 if ((var == NULL) || (var->mode != (unsigned) mode))
137 continue;
138
139 /* Only assign locations for generic attributes / varyings / etc.
140 */
141 if (var->location >= generic_base)
142 var->location = -1;
143 }
144 }
145
146
147 /**
148 * Determine the number of attribute slots required for a particular type
149 *
150 * This code is here because it implements the language rules of a specific
151 * GLSL version. Since it's a property of the language and not a property of
152 * types in general, it doesn't really belong in glsl_type.
153 */
154 unsigned
155 count_attribute_slots(const glsl_type *t)
156 {
157 /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
158 *
159 * "A scalar input counts the same amount against this limit as a vec4,
160 * so applications may want to consider packing groups of four
161 * unrelated float inputs together into a vector to better utilize the
162 * capabilities of the underlying hardware. A matrix input will use up
163 * multiple locations. The number of locations used will equal the
164 * number of columns in the matrix."
165 *
166 * The spec does not explicitly say how arrays are counted. However, it
167 * should be safe to assume the total number of slots consumed by an array
168 * is the number of entries in the array multiplied by the number of slots
169 * consumed by a single element of the array.
170 */
171
172 if (t->is_array())
173 return t->array_size() * count_attribute_slots(t->element_type());
174
175 if (t->is_matrix())
176 return t->matrix_columns;
177
178 return 1;
179 }
180
181
182 /**
183 * Verify that a vertex shader executable meets all semantic requirements
184 *
185 * \param shader Vertex shader executable to be verified
186 */
187 bool
188 validate_vertex_shader_executable(struct gl_shader_program *prog,
189 struct gl_shader *shader)
190 {
191 if (shader == NULL)
192 return true;
193
194 if (!shader->symbols->get_function("main")) {
195 linker_error_printf(prog, "vertex shader lacks `main'\n");
196 return false;
197 }
198
199 find_assignment_visitor find("gl_Position");
200 find.run(shader->ir);
201 if (!find.variable_found()) {
202 linker_error_printf(prog,
203 "vertex shader does not write to `gl_Position'\n");
204 return false;
205 }
206
207 return true;
208 }
209
210
211 /**
212 * Verify that a fragment shader executable meets all semantic requirements
213 *
214 * \param shader Fragment shader executable to be verified
215 */
216 bool
217 validate_fragment_shader_executable(struct gl_shader_program *prog,
218 struct gl_shader *shader)
219 {
220 if (shader == NULL)
221 return true;
222
223 if (!shader->symbols->get_function("main")) {
224 linker_error_printf(prog, "fragment shader lacks `main'\n");
225 return false;
226 }
227
228 find_assignment_visitor frag_color("gl_FragColor");
229 find_assignment_visitor frag_data("gl_FragData");
230
231 frag_color.run(shader->ir);
232 frag_data.run(shader->ir);
233
234 if (frag_color.variable_found() && frag_data.variable_found()) {
235 linker_error_printf(prog, "fragment shader writes to both "
236 "`gl_FragColor' and `gl_FragData'\n");
237 return false;
238 }
239
240 return true;
241 }
242
243
244 /**
245 * Generate a string describing the mode of a variable
246 */
247 static const char *
248 mode_string(const ir_variable *var)
249 {
250 switch (var->mode) {
251 case ir_var_auto:
252 return (var->read_only) ? "global constant" : "global variable";
253
254 case ir_var_uniform: return "uniform";
255 case ir_var_in: return "shader input";
256 case ir_var_out: return "shader output";
257 case ir_var_inout: return "shader inout";
258 default:
259 assert(!"Should not get here.");
260 return "invalid variable";
261 }
262 }
263
264
265 /**
266 * Perform validation of global variables used across multiple shaders
267 */
268 bool
269 cross_validate_globals(struct gl_shader_program *prog,
270 struct gl_shader **shader_list,
271 unsigned num_shaders,
272 bool uniforms_only)
273 {
274 /* Examine all of the uniforms in all of the shaders and cross validate
275 * them.
276 */
277 glsl_symbol_table variables;
278 for (unsigned i = 0; i < num_shaders; i++) {
279 foreach_list(node, shader_list[i]->ir) {
280 ir_variable *const var = ((ir_instruction *) node)->as_variable();
281
282 if (var == NULL)
283 continue;
284
285 if (uniforms_only && (var->mode != ir_var_uniform))
286 continue;
287
288 /* If a global with this name has already been seen, verify that the
289 * new instance has the same type. In addition, if the globals have
290 * initializers, the values of the initializers must be the same.
291 */
292 ir_variable *const existing = variables.get_variable(var->name);
293 if (existing != NULL) {
294 if (var->type != existing->type) {
295 linker_error_printf(prog, "%s `%s' declared as type "
296 "`%s' and type `%s'\n",
297 mode_string(var),
298 var->name, var->type->name,
299 existing->type->name);
300 return false;
301 }
302
303 /* FINISHME: Handle non-constant initializers.
304 */
305 if (var->constant_value != NULL) {
306 if (existing->constant_value != NULL) {
307 if (!var->constant_value->has_value(existing->constant_value)) {
308 linker_error_printf(prog, "initializers for %s "
309 "`%s' have differing values\n",
310 mode_string(var), var->name);
311 return false;
312 }
313 } else
314 /* If the first-seen instance of a particular uniform did not
315 * have an initializer but a later instance does, copy the
316 * initializer to the version stored in the symbol table.
317 */
318 existing->constant_value = var->constant_value->clone(NULL);
319 }
320 } else
321 variables.add_variable(var->name, var);
322 }
323 }
324
325 return true;
326 }
327
328
329 /**
330 * Perform validation of uniforms used across multiple shader stages
331 */
332 bool
333 cross_validate_uniforms(struct gl_shader_program *prog)
334 {
335 return cross_validate_globals(prog, prog->_LinkedShaders,
336 prog->_NumLinkedShaders, true);
337 }
338
339
340 /**
341 * Validate that outputs from one stage match inputs of another
342 */
343 bool
344 cross_validate_outputs_to_inputs(struct gl_shader_program *prog,
345 gl_shader *producer, gl_shader *consumer)
346 {
347 glsl_symbol_table parameters;
348 /* FINISHME: Figure these out dynamically. */
349 const char *const producer_stage = "vertex";
350 const char *const consumer_stage = "fragment";
351
352 /* Find all shader outputs in the "producer" stage.
353 */
354 foreach_list(node, producer->ir) {
355 ir_variable *const var = ((ir_instruction *) node)->as_variable();
356
357 /* FINISHME: For geometry shaders, this should also look for inout
358 * FINISHME: variables.
359 */
360 if ((var == NULL) || (var->mode != ir_var_out))
361 continue;
362
363 parameters.add_variable(var->name, var);
364 }
365
366
367 /* Find all shader inputs in the "consumer" stage. Any variables that have
368 * matching outputs already in the symbol table must have the same type and
369 * qualifiers.
370 */
371 foreach_list(node, consumer->ir) {
372 ir_variable *const input = ((ir_instruction *) node)->as_variable();
373
374 /* FINISHME: For geometry shaders, this should also look for inout
375 * FINISHME: variables.
376 */
377 if ((input == NULL) || (input->mode != ir_var_in))
378 continue;
379
380 ir_variable *const output = parameters.get_variable(input->name);
381 if (output != NULL) {
382 /* Check that the types match between stages.
383 */
384 if (input->type != output->type) {
385 linker_error_printf(prog,
386 "%s shader output `%s' delcared as "
387 "type `%s', but %s shader input declared "
388 "as type `%s'\n",
389 producer_stage, output->name,
390 output->type->name,
391 consumer_stage, input->type->name);
392 return false;
393 }
394
395 /* Check that all of the qualifiers match between stages.
396 */
397 if (input->centroid != output->centroid) {
398 linker_error_printf(prog,
399 "%s shader output `%s' %s centroid qualifier, "
400 "but %s shader input %s centroid qualifier\n",
401 producer_stage,
402 output->name,
403 (output->centroid) ? "has" : "lacks",
404 consumer_stage,
405 (input->centroid) ? "has" : "lacks");
406 return false;
407 }
408
409 if (input->invariant != output->invariant) {
410 linker_error_printf(prog,
411 "%s shader output `%s' %s invariant qualifier, "
412 "but %s shader input %s invariant qualifier\n",
413 producer_stage,
414 output->name,
415 (output->invariant) ? "has" : "lacks",
416 consumer_stage,
417 (input->invariant) ? "has" : "lacks");
418 return false;
419 }
420
421 if (input->interpolation != output->interpolation) {
422 linker_error_printf(prog,
423 "%s shader output `%s' specifies %s "
424 "interpolation qualifier, "
425 "but %s shader input specifies %s "
426 "interpolation qualifier\n",
427 producer_stage,
428 output->name,
429 output->interpolation_string(),
430 consumer_stage,
431 input->interpolation_string());
432 return false;
433 }
434 }
435 }
436
437 return true;
438 }
439
440
441 /**
442 * Populates a shaders symbol table with all global declarations
443 */
444 static void
445 populate_symbol_table(gl_shader *sh)
446 {
447 sh->symbols = new(sh) glsl_symbol_table;
448
449 foreach_list(node, sh->ir) {
450 ir_instruction *const inst = (ir_instruction *) node;
451 ir_variable *var;
452 ir_function *func;
453
454 if ((func = inst->as_function()) != NULL) {
455 sh->symbols->add_function(func->name, func);
456 } else if ((var = inst->as_variable()) != NULL) {
457 sh->symbols->add_variable(var->name, var);
458 }
459 }
460 }
461
462
463 /**
464 * Remap variables referenced in an instruction tree
465 *
466 * This is used when instruction trees are cloned from one shader and placed in
467 * another. These trees will contain references to \c ir_variable nodes that
468 * do not exist in the target shader. This function finds these \c ir_variable
469 * references and replaces the references with matching variables in the target
470 * shader.
471 *
472 * If there is no matching variable in the target shader, a clone of the
473 * \c ir_variable is made and added to the target shader. The new variable is
474 * added to \b both the instruction stream and the symbol table.
475 *
476 * \param inst IR tree that is to be processed.
477 * \param symbols Symbol table containing global scope symbols in the
478 * linked shader.
479 * \param instructions Instruction stream where new variable declarations
480 * should be added.
481 */
482 void
483 remap_variables(ir_instruction *inst, glsl_symbol_table *symbols,
484 exec_list *instructions)
485 {
486 class remap_visitor : public ir_hierarchical_visitor {
487 public:
488 remap_visitor(glsl_symbol_table *symbols, exec_list *instructions)
489 {
490 this->symbols = symbols;
491 this->instructions = instructions;
492 }
493
494 virtual ir_visitor_status visit(ir_dereference_variable *ir)
495 {
496 ir_variable *const existing =
497 this->symbols->get_variable(ir->var->name);
498 if (existing != NULL)
499 ir->var = existing;
500 else {
501 ir_variable *copy = ir->var->clone(NULL);
502
503 this->symbols->add_variable(copy->name, copy);
504 this->instructions->push_head(copy);
505 }
506
507 return visit_continue;
508 }
509
510 private:
511 glsl_symbol_table *symbols;
512 exec_list *instructions;
513 };
514
515 remap_visitor v(symbols, instructions);
516
517 inst->accept(&v);
518 }
519
520
521 /**
522 * Move non-declarations from one instruction stream to another
523 *
524 * The intended usage pattern of this function is to pass the pointer to the
525 * head sentinal of a list (i.e., a pointer to the list cast to an \c exec_node
526 * pointer) for \c last and \c false for \c make_copies on the first
527 * call. Successive calls pass the return value of the previous call for
528 * \c last and \c true for \c make_copies.
529 *
530 * \param instructions Source instruction stream
531 * \param last Instruction after which new instructions should be
532 * inserted in the target instruction stream
533 * \param make_copies Flag selecting whether instructions in \c instructions
534 * should be copied (via \c ir_instruction::clone) into the
535 * target list or moved.
536 *
537 * \return
538 * The new "last" instruction in the target instruction stream. This pointer
539 * is suitable for use as the \c last parameter of a later call to this
540 * function.
541 */
542 exec_node *
543 move_non_declarations(exec_list *instructions, exec_node *last,
544 bool make_copies, gl_shader *target)
545 {
546 foreach_list(node, instructions) {
547 ir_instruction *inst = (ir_instruction *) node;
548
549 if (inst->as_variable() || inst->as_function())
550 continue;
551
552 assert(inst->as_assignment());
553
554 if (make_copies) {
555 inst = inst->clone(NULL);
556 remap_variables(inst, target->symbols, target->ir);
557 } else {
558 inst->remove();
559 }
560
561 last->insert_after(inst);
562 last = inst;
563 }
564
565 return last;
566 }
567
568 /**
569 * Get the function signature for main from a shader
570 */
571 static ir_function_signature *
572 get_main_function_signature(gl_shader *sh)
573 {
574 ir_function *const f = sh->symbols->get_function("main");
575 if (f != NULL) {
576 exec_list void_parameters;
577
578 /* Look for the 'void main()' signature and ensure that it's defined.
579 * This keeps the linker from accidentally pick a shader that just
580 * contains a prototype for main.
581 *
582 * We don't have to check for multiple definitions of main (in multiple
583 * shaders) because that would have already been caught above.
584 */
585 ir_function_signature *sig = f->matching_signature(&void_parameters);
586 if ((sig != NULL) && sig->is_defined) {
587 return sig;
588 }
589 }
590
591 return NULL;
592 }
593
594
595 /**
596 * Combine a group of shaders for a single stage to generate a linked shader
597 *
598 * \note
599 * If this function is supplied a single shader, it is cloned, and the new
600 * shader is returned.
601 */
602 static struct gl_shader *
603 link_intrastage_shaders(struct gl_shader_program *prog,
604 struct gl_shader **shader_list,
605 unsigned num_shaders)
606 {
607 /* Check that global variables defined in multiple shaders are consistent.
608 */
609 if (!cross_validate_globals(prog, shader_list, num_shaders, false))
610 return NULL;
611
612 /* Check that there is only a single definition of each function signature
613 * across all shaders.
614 */
615 for (unsigned i = 0; i < (num_shaders - 1); i++) {
616 foreach_list(node, shader_list[i]->ir) {
617 ir_function *const f = ((ir_instruction *) node)->as_function();
618
619 if (f == NULL)
620 continue;
621
622 for (unsigned j = i + 1; j < num_shaders; j++) {
623 ir_function *const other =
624 shader_list[j]->symbols->get_function(f->name);
625
626 /* If the other shader has no function (and therefore no function
627 * signatures) with the same name, skip to the next shader.
628 */
629 if (other == NULL)
630 continue;
631
632 foreach_iter (exec_list_iterator, iter, *f) {
633 ir_function_signature *sig =
634 (ir_function_signature *) iter.get();
635
636 if (!sig->is_defined || sig->is_built_in)
637 continue;
638
639 ir_function_signature *other_sig =
640 other->exact_matching_signature(& sig->parameters);
641
642 if ((other_sig != NULL) && other_sig->is_defined
643 && !other_sig->is_built_in) {
644 linker_error_printf(prog,
645 "function `%s' is multiply defined",
646 f->name);
647 return NULL;
648 }
649 }
650 }
651 }
652 }
653
654 /* Find the shader that defines main, and make a clone of it.
655 *
656 * Starting with the clone, search for undefined references. If one is
657 * found, find the shader that defines it. Clone the reference and add
658 * it to the shader. Repeat until there are no undefined references or
659 * until a reference cannot be resolved.
660 */
661 gl_shader *main = NULL;
662 for (unsigned i = 0; i < num_shaders; i++) {
663 if (get_main_function_signature(shader_list[i]) != NULL) {
664 main = shader_list[i];
665 break;
666 }
667 }
668
669 if (main == NULL) {
670 linker_error_printf(prog, "%s shader lacks `main'\n",
671 (shader_list[0]->Type == GL_VERTEX_SHADER)
672 ? "vertex" : "fragment");
673 return NULL;
674 }
675
676 gl_shader *const linked = _mesa_new_shader(NULL, 0, main->Type);
677 linked->ir = new(linked) exec_list;
678 clone_ir_list(linked->ir, main->ir);
679
680 populate_symbol_table(linked);
681
682 /* The a pointer to the main function in the final linked shader (i.e., the
683 * copy of the original shader that contained the main function).
684 */
685 ir_function_signature *const main_sig = get_main_function_signature(linked);
686
687 /* Move any instructions other than variable declarations or function
688 * declarations into main.
689 */
690 exec_node *insertion_point = (exec_node *) &main_sig->body;
691 for (unsigned i = 0; i < num_shaders; i++) {
692 insertion_point = move_non_declarations(shader_list[i]->ir,
693 insertion_point,
694 (shader_list[i] != main),
695 linked);
696 }
697
698 /* Resolve initializers for global variables in the linked shader.
699 */
700
701 return linked;
702 }
703
704
705 struct uniform_node {
706 exec_node link;
707 struct gl_uniform *u;
708 unsigned slots;
709 };
710
711 void
712 assign_uniform_locations(struct gl_shader_program *prog)
713 {
714 /* */
715 exec_list uniforms;
716 unsigned total_uniforms = 0;
717 hash_table *ht = hash_table_ctor(32, hash_table_string_hash,
718 hash_table_string_compare);
719
720 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
721 unsigned next_position = 0;
722
723 foreach_list(node, prog->_LinkedShaders[i]->ir) {
724 ir_variable *const var = ((ir_instruction *) node)->as_variable();
725
726 if ((var == NULL) || (var->mode != ir_var_uniform))
727 continue;
728
729 const unsigned vec4_slots = (var->component_slots() + 3) / 4;
730 assert(vec4_slots != 0);
731
732 uniform_node *n = (uniform_node *) hash_table_find(ht, var->name);
733 if (n == NULL) {
734 n = (uniform_node *) calloc(1, sizeof(struct uniform_node));
735 n->u = (gl_uniform *) calloc(vec4_slots, sizeof(struct gl_uniform));
736 n->slots = vec4_slots;
737
738 n->u[0].Name = strdup(var->name);
739 for (unsigned j = 1; j < vec4_slots; j++)
740 n->u[j].Name = n->u[0].Name;
741
742 hash_table_insert(ht, n, n->u[0].Name);
743 uniforms.push_tail(& n->link);
744 total_uniforms += vec4_slots;
745 }
746
747 if (var->constant_value != NULL)
748 for (unsigned j = 0; j < vec4_slots; j++)
749 n->u[j].Initialized = true;
750
751 var->location = next_position;
752
753 for (unsigned j = 0; j < vec4_slots; j++) {
754 switch (prog->_LinkedShaders[i]->Type) {
755 case GL_VERTEX_SHADER:
756 n->u[j].VertPos = next_position;
757 break;
758 case GL_FRAGMENT_SHADER:
759 n->u[j].FragPos = next_position;
760 break;
761 case GL_GEOMETRY_SHADER:
762 /* FINISHME: Support geometry shaders. */
763 assert(prog->_LinkedShaders[i]->Type != GL_GEOMETRY_SHADER);
764 break;
765 }
766
767 next_position++;
768 }
769 }
770 }
771
772 gl_uniform_list *ul = (gl_uniform_list *)
773 calloc(1, sizeof(gl_uniform_list));
774
775 ul->Size = total_uniforms;
776 ul->NumUniforms = total_uniforms;
777 ul->Uniforms = (gl_uniform *) calloc(total_uniforms, sizeof(gl_uniform));
778
779 unsigned idx = 0;
780 uniform_node *next;
781 for (uniform_node *node = (uniform_node *) uniforms.head
782 ; node->link.next != NULL
783 ; node = next) {
784 next = (uniform_node *) node->link.next;
785
786 node->link.remove();
787 memcpy(&ul->Uniforms[idx], node->u, sizeof(gl_uniform) * node->slots);
788 idx += node->slots;
789
790 free(node->u);
791 free(node);
792 }
793
794 hash_table_dtor(ht);
795
796 prog->Uniforms = ul;
797 }
798
799
800 /**
801 * Find a contiguous set of available bits in a bitmask
802 *
803 * \param used_mask Bits representing used (1) and unused (0) locations
804 * \param needed_count Number of contiguous bits needed.
805 *
806 * \return
807 * Base location of the available bits on success or -1 on failure.
808 */
809 int
810 find_available_slots(unsigned used_mask, unsigned needed_count)
811 {
812 unsigned needed_mask = (1 << needed_count) - 1;
813 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
814
815 /* The comparison to 32 is redundant, but without it GCC emits "warning:
816 * cannot optimize possibly infinite loops" for the loop below.
817 */
818 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
819 return -1;
820
821 for (int i = 0; i <= max_bit_to_test; i++) {
822 if ((needed_mask & ~used_mask) == needed_mask)
823 return i;
824
825 needed_mask <<= 1;
826 }
827
828 return -1;
829 }
830
831
832 bool
833 assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index)
834 {
835 /* Mark invalid attribute locations as being used.
836 */
837 unsigned used_locations = (max_attribute_index >= 32)
838 ? ~0 : ~((1 << max_attribute_index) - 1);
839
840 gl_shader *const sh = prog->_LinkedShaders[0];
841 assert(sh->Type == GL_VERTEX_SHADER);
842
843 /* Operate in a total of four passes.
844 *
845 * 1. Invalidate the location assignments for all vertex shader inputs.
846 *
847 * 2. Assign locations for inputs that have user-defined (via
848 * glBindVertexAttribLocation) locatoins.
849 *
850 * 3. Sort the attributes without assigned locations by number of slots
851 * required in decreasing order. Fragmentation caused by attribute
852 * locations assigned by the application may prevent large attributes
853 * from having enough contiguous space.
854 *
855 * 4. Assign locations to any inputs without assigned locations.
856 */
857
858 invalidate_variable_locations(sh, ir_var_in, VERT_ATTRIB_GENERIC0);
859
860 if (prog->Attributes != NULL) {
861 for (unsigned i = 0; i < prog->Attributes->NumParameters; i++) {
862 ir_variable *const var =
863 sh->symbols->get_variable(prog->Attributes->Parameters[i].Name);
864
865 /* Note: attributes that occupy multiple slots, such as arrays or
866 * matrices, may appear in the attrib array multiple times.
867 */
868 if ((var == NULL) || (var->location != -1))
869 continue;
870
871 /* From page 61 of the OpenGL 4.0 spec:
872 *
873 * "LinkProgram will fail if the attribute bindings assigned by
874 * BindAttribLocation do not leave not enough space to assign a
875 * location for an active matrix attribute or an active attribute
876 * array, both of which require multiple contiguous generic
877 * attributes."
878 *
879 * Previous versions of the spec contain similar language but omit the
880 * bit about attribute arrays.
881 *
882 * Page 61 of the OpenGL 4.0 spec also says:
883 *
884 * "It is possible for an application to bind more than one
885 * attribute name to the same location. This is referred to as
886 * aliasing. This will only work if only one of the aliased
887 * attributes is active in the executable program, or if no path
888 * through the shader consumes more than one attribute of a set
889 * of attributes aliased to the same location. A link error can
890 * occur if the linker determines that every path through the
891 * shader consumes multiple aliased attributes, but
892 * implementations are not required to generate an error in this
893 * case."
894 *
895 * These two paragraphs are either somewhat contradictory, or I don't
896 * fully understand one or both of them.
897 */
898 /* FINISHME: The code as currently written does not support attribute
899 * FINISHME: location aliasing (see comment above).
900 */
901 const int attr = prog->Attributes->Parameters[i].StateIndexes[0];
902 const unsigned slots = count_attribute_slots(var->type);
903
904 /* Mask representing the contiguous slots that will be used by this
905 * attribute.
906 */
907 const unsigned use_mask = (1 << slots) - 1;
908
909 /* Generate a link error if the set of bits requested for this
910 * attribute overlaps any previously allocated bits.
911 */
912 if ((~(use_mask << attr) & used_locations) != used_locations) {
913 linker_error_printf(prog,
914 "insufficient contiguous attribute locations "
915 "available for vertex shader input `%s'",
916 var->name);
917 return false;
918 }
919
920 var->location = VERT_ATTRIB_GENERIC0 + attr;
921 used_locations |= (use_mask << attr);
922 }
923 }
924
925 /* Temporary storage for the set of attributes that need locations assigned.
926 */
927 struct temp_attr {
928 unsigned slots;
929 ir_variable *var;
930
931 /* Used below in the call to qsort. */
932 static int compare(const void *a, const void *b)
933 {
934 const temp_attr *const l = (const temp_attr *) a;
935 const temp_attr *const r = (const temp_attr *) b;
936
937 /* Reversed because we want a descending order sort below. */
938 return r->slots - l->slots;
939 }
940 } to_assign[16];
941
942 unsigned num_attr = 0;
943
944 foreach_list(node, sh->ir) {
945 ir_variable *const var = ((ir_instruction *) node)->as_variable();
946
947 if ((var == NULL) || (var->mode != ir_var_in))
948 continue;
949
950 /* The location was explicitly assigned, nothing to do here.
951 */
952 if (var->location != -1)
953 continue;
954
955 to_assign[num_attr].slots = count_attribute_slots(var->type);
956 to_assign[num_attr].var = var;
957 num_attr++;
958 }
959
960 /* If all of the attributes were assigned locations by the application (or
961 * are built-in attributes with fixed locations), return early. This should
962 * be the common case.
963 */
964 if (num_attr == 0)
965 return true;
966
967 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
968
969 /* VERT_ATTRIB_GENERIC0 is a psdueo-alias for VERT_ATTRIB_POS. It can only
970 * be explicitly assigned by via glBindAttribLocation. Mark it as reserved
971 * to prevent it from being automatically allocated below.
972 */
973 used_locations |= (1 << 0);
974
975 for (unsigned i = 0; i < num_attr; i++) {
976 /* Mask representing the contiguous slots that will be used by this
977 * attribute.
978 */
979 const unsigned use_mask = (1 << to_assign[i].slots) - 1;
980
981 int location = find_available_slots(used_locations, to_assign[i].slots);
982
983 if (location < 0) {
984 linker_error_printf(prog,
985 "insufficient contiguous attribute locations "
986 "available for vertex shader input `%s'",
987 to_assign[i].var->name);
988 return false;
989 }
990
991 to_assign[i].var->location = VERT_ATTRIB_GENERIC0 + location;
992 used_locations |= (use_mask << location);
993 }
994
995 return true;
996 }
997
998
999 void
1000 assign_varying_locations(gl_shader *producer, gl_shader *consumer)
1001 {
1002 /* FINISHME: Set dynamically when geometry shader support is added. */
1003 unsigned output_index = VERT_RESULT_VAR0;
1004 unsigned input_index = FRAG_ATTRIB_VAR0;
1005
1006 /* Operate in a total of three passes.
1007 *
1008 * 1. Assign locations for any matching inputs and outputs.
1009 *
1010 * 2. Mark output variables in the producer that do not have locations as
1011 * not being outputs. This lets the optimizer eliminate them.
1012 *
1013 * 3. Mark input variables in the consumer that do not have locations as
1014 * not being inputs. This lets the optimizer eliminate them.
1015 */
1016
1017 invalidate_variable_locations(producer, ir_var_out, VERT_RESULT_VAR0);
1018 invalidate_variable_locations(consumer, ir_var_in, FRAG_ATTRIB_VAR0);
1019
1020 foreach_list(node, producer->ir) {
1021 ir_variable *const output_var = ((ir_instruction *) node)->as_variable();
1022
1023 if ((output_var == NULL) || (output_var->mode != ir_var_out)
1024 || (output_var->location != -1))
1025 continue;
1026
1027 ir_variable *const input_var =
1028 consumer->symbols->get_variable(output_var->name);
1029
1030 if ((input_var == NULL) || (input_var->mode != ir_var_in))
1031 continue;
1032
1033 assert(input_var->location == -1);
1034
1035 /* FINISHME: Location assignment will need some changes when arrays,
1036 * FINISHME: matrices, and structures are allowed as shader inputs /
1037 * FINISHME: outputs.
1038 */
1039 output_var->location = output_index;
1040 input_var->location = input_index;
1041
1042 output_index++;
1043 input_index++;
1044 }
1045
1046 foreach_list(node, producer->ir) {
1047 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1048
1049 if ((var == NULL) || (var->mode != ir_var_out))
1050 continue;
1051
1052 /* An 'out' variable is only really a shader output if its value is read
1053 * by the following stage.
1054 */
1055 if (var->location == -1) {
1056 var->shader_out = false;
1057 var->mode = ir_var_auto;
1058 }
1059 }
1060
1061 foreach_list(node, consumer->ir) {
1062 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1063
1064 if ((var == NULL) || (var->mode != ir_var_in))
1065 continue;
1066
1067 /* An 'in' variable is only really a shader input if its value is written
1068 * by the previous stage.
1069 */
1070 var->shader_in = (var->location != -1);
1071 }
1072 }
1073
1074
1075 void
1076 link_shaders(struct gl_shader_program *prog)
1077 {
1078 prog->LinkStatus = false;
1079 prog->Validated = false;
1080 prog->_Used = false;
1081
1082 if (prog->InfoLog != NULL)
1083 talloc_free(prog->InfoLog);
1084
1085 prog->InfoLog = talloc_strdup(NULL, "");
1086
1087 /* Separate the shaders into groups based on their type.
1088 */
1089 struct gl_shader **vert_shader_list;
1090 unsigned num_vert_shaders = 0;
1091 struct gl_shader **frag_shader_list;
1092 unsigned num_frag_shaders = 0;
1093
1094 vert_shader_list = (struct gl_shader **)
1095 calloc(2 * prog->NumShaders, sizeof(struct gl_shader *));
1096 frag_shader_list = &vert_shader_list[prog->NumShaders];
1097
1098 for (unsigned i = 0; i < prog->NumShaders; i++) {
1099 switch (prog->Shaders[i]->Type) {
1100 case GL_VERTEX_SHADER:
1101 vert_shader_list[num_vert_shaders] = prog->Shaders[i];
1102 num_vert_shaders++;
1103 break;
1104 case GL_FRAGMENT_SHADER:
1105 frag_shader_list[num_frag_shaders] = prog->Shaders[i];
1106 num_frag_shaders++;
1107 break;
1108 case GL_GEOMETRY_SHADER:
1109 /* FINISHME: Support geometry shaders. */
1110 assert(prog->Shaders[i]->Type != GL_GEOMETRY_SHADER);
1111 break;
1112 }
1113 }
1114
1115 /* FINISHME: Implement intra-stage linking. */
1116 prog->_NumLinkedShaders = 0;
1117 if (num_vert_shaders > 0) {
1118 gl_shader *const sh =
1119 link_intrastage_shaders(prog, vert_shader_list, num_vert_shaders);
1120
1121 if (sh == NULL)
1122 goto done;
1123
1124 if (!validate_vertex_shader_executable(prog, sh))
1125 goto done;
1126
1127 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh;
1128 prog->_NumLinkedShaders++;
1129 }
1130
1131 if (num_frag_shaders > 0) {
1132 gl_shader *const sh =
1133 link_intrastage_shaders(prog, frag_shader_list, num_frag_shaders);
1134
1135 if (sh == NULL)
1136 goto done;
1137
1138 if (!validate_fragment_shader_executable(prog, sh))
1139 goto done;
1140
1141 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh;
1142 prog->_NumLinkedShaders++;
1143 }
1144
1145 /* Here begins the inter-stage linking phase. Some initial validation is
1146 * performed, then locations are assigned for uniforms, attributes, and
1147 * varyings.
1148 */
1149 if (cross_validate_uniforms(prog)) {
1150 /* Validate the inputs of each stage with the output of the preceeding
1151 * stage.
1152 */
1153 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++) {
1154 if (!cross_validate_outputs_to_inputs(prog,
1155 prog->_LinkedShaders[i - 1],
1156 prog->_LinkedShaders[i]))
1157 goto done;
1158 }
1159
1160 prog->LinkStatus = true;
1161 }
1162
1163 /* FINISHME: Perform whole-program optimization here. */
1164
1165 assign_uniform_locations(prog);
1166
1167 if (prog->_LinkedShaders[0]->Type == GL_VERTEX_SHADER)
1168 /* FINISHME: The value of the max_attribute_index parameter is
1169 * FINISHME: implementation dependent based on the value of
1170 * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
1171 * FINISHME: at least 16, so hardcode 16 for now.
1172 */
1173 if (!assign_attribute_locations(prog, 16))
1174 goto done;
1175
1176 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++)
1177 assign_varying_locations(prog->_LinkedShaders[i - 1],
1178 prog->_LinkedShaders[i]);
1179
1180 /* FINISHME: Assign fragment shader output locations. */
1181
1182 done:
1183 free(vert_shader_list);
1184 }