glsl2: Add and use new variable mode ir_var_temporary
[mesa.git] / src / glsl / linker.cpp
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file linker.cpp
26 * GLSL linker implementation
27 *
28 * Given a set of shaders that are to be linked to generate a final program,
29 * there are three distinct stages.
30 *
31 * In the first stage shaders are partitioned into groups based on the shader
32 * type. All shaders of a particular type (e.g., vertex shaders) are linked
33 * together.
34 *
35 * - Undefined references in each shader are resolve to definitions in
36 * another shader.
37 * - Types and qualifiers of uniforms, outputs, and global variables defined
38 * in multiple shaders with the same name are verified to be the same.
39 * - Initializers for uniforms and global variables defined
40 * in multiple shaders with the same name are verified to be the same.
41 *
42 * The result, in the terminology of the GLSL spec, is a set of shader
43 * executables for each processing unit.
44 *
45 * After the first stage is complete, a series of semantic checks are performed
46 * on each of the shader executables.
47 *
48 * - Each shader executable must define a \c main function.
49 * - Each vertex shader executable must write to \c gl_Position.
50 * - Each fragment shader executable must write to either \c gl_FragData or
51 * \c gl_FragColor.
52 *
53 * In the final stage individual shader executables are linked to create a
54 * complete exectuable.
55 *
56 * - Types of uniforms defined in multiple shader stages with the same name
57 * are verified to be the same.
58 * - Initializers for uniforms defined in multiple shader stages with the
59 * same name are verified to be the same.
60 * - Types and qualifiers of outputs defined in one stage are verified to
61 * be the same as the types and qualifiers of inputs defined with the same
62 * name in a later stage.
63 *
64 * \author Ian Romanick <ian.d.romanick@intel.com>
65 */
66 #include <cstdlib>
67 #include <cstdio>
68 #include <cstdarg>
69 #include <climits>
70
71 extern "C" {
72 #include <talloc.h>
73 }
74
75 #include "main/mtypes.h"
76 #include "main/macros.h"
77 #include "glsl_symbol_table.h"
78 #include "ir.h"
79 #include "program.h"
80 #include "hash_table.h"
81 #include "shader_api.h"
82 #include "linker.h"
83
84 /**
85 * Visitor that determines whether or not a variable is ever written.
86 */
87 class find_assignment_visitor : public ir_hierarchical_visitor {
88 public:
89 find_assignment_visitor(const char *name)
90 : name(name), found(false)
91 {
92 /* empty */
93 }
94
95 virtual ir_visitor_status visit_enter(ir_assignment *ir)
96 {
97 ir_variable *const var = ir->lhs->variable_referenced();
98
99 if (strcmp(name, var->name) == 0) {
100 found = true;
101 return visit_stop;
102 }
103
104 return visit_continue_with_parent;
105 }
106
107 bool variable_found()
108 {
109 return found;
110 }
111
112 private:
113 const char *name; /**< Find writes to a variable with this name. */
114 bool found; /**< Was a write to the variable found? */
115 };
116
117
118 void
119 linker_error_printf(gl_shader_program *prog, const char *fmt, ...)
120 {
121 va_list ap;
122
123 prog->InfoLog = talloc_strdup_append(prog->InfoLog, "error: ");
124 va_start(ap, fmt);
125 prog->InfoLog = talloc_vasprintf_append(prog->InfoLog, fmt, ap);
126 va_end(ap);
127 }
128
129
130 void
131 invalidate_variable_locations(gl_shader *sh, enum ir_variable_mode mode,
132 int generic_base)
133 {
134 foreach_list(node, sh->ir) {
135 ir_variable *const var = ((ir_instruction *) node)->as_variable();
136
137 if ((var == NULL) || (var->mode != (unsigned) mode))
138 continue;
139
140 /* Only assign locations for generic attributes / varyings / etc.
141 */
142 if (var->location >= generic_base)
143 var->location = -1;
144 }
145 }
146
147
148 /**
149 * Determine the number of attribute slots required for a particular type
150 *
151 * This code is here because it implements the language rules of a specific
152 * GLSL version. Since it's a property of the language and not a property of
153 * types in general, it doesn't really belong in glsl_type.
154 */
155 unsigned
156 count_attribute_slots(const glsl_type *t)
157 {
158 /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
159 *
160 * "A scalar input counts the same amount against this limit as a vec4,
161 * so applications may want to consider packing groups of four
162 * unrelated float inputs together into a vector to better utilize the
163 * capabilities of the underlying hardware. A matrix input will use up
164 * multiple locations. The number of locations used will equal the
165 * number of columns in the matrix."
166 *
167 * The spec does not explicitly say how arrays are counted. However, it
168 * should be safe to assume the total number of slots consumed by an array
169 * is the number of entries in the array multiplied by the number of slots
170 * consumed by a single element of the array.
171 */
172
173 if (t->is_array())
174 return t->array_size() * count_attribute_slots(t->element_type());
175
176 if (t->is_matrix())
177 return t->matrix_columns;
178
179 return 1;
180 }
181
182
183 /**
184 * Verify that a vertex shader executable meets all semantic requirements
185 *
186 * \param shader Vertex shader executable to be verified
187 */
188 bool
189 validate_vertex_shader_executable(struct gl_shader_program *prog,
190 struct gl_shader *shader)
191 {
192 if (shader == NULL)
193 return true;
194
195 find_assignment_visitor find("gl_Position");
196 find.run(shader->ir);
197 if (!find.variable_found()) {
198 linker_error_printf(prog,
199 "vertex shader does not write to `gl_Position'\n");
200 return false;
201 }
202
203 return true;
204 }
205
206
207 /**
208 * Verify that a fragment shader executable meets all semantic requirements
209 *
210 * \param shader Fragment shader executable to be verified
211 */
212 bool
213 validate_fragment_shader_executable(struct gl_shader_program *prog,
214 struct gl_shader *shader)
215 {
216 if (shader == NULL)
217 return true;
218
219 find_assignment_visitor frag_color("gl_FragColor");
220 find_assignment_visitor frag_data("gl_FragData");
221
222 frag_color.run(shader->ir);
223 frag_data.run(shader->ir);
224
225 if (frag_color.variable_found() && frag_data.variable_found()) {
226 linker_error_printf(prog, "fragment shader writes to both "
227 "`gl_FragColor' and `gl_FragData'\n");
228 return false;
229 }
230
231 return true;
232 }
233
234
235 /**
236 * Generate a string describing the mode of a variable
237 */
238 static const char *
239 mode_string(const ir_variable *var)
240 {
241 switch (var->mode) {
242 case ir_var_auto:
243 return (var->read_only) ? "global constant" : "global variable";
244
245 case ir_var_uniform: return "uniform";
246 case ir_var_in: return "shader input";
247 case ir_var_out: return "shader output";
248 case ir_var_inout: return "shader inout";
249
250 case ir_var_temporary:
251 default:
252 assert(!"Should not get here.");
253 return "invalid variable";
254 }
255 }
256
257
258 /**
259 * Perform validation of global variables used across multiple shaders
260 */
261 bool
262 cross_validate_globals(struct gl_shader_program *prog,
263 struct gl_shader **shader_list,
264 unsigned num_shaders,
265 bool uniforms_only)
266 {
267 /* Examine all of the uniforms in all of the shaders and cross validate
268 * them.
269 */
270 glsl_symbol_table variables;
271 for (unsigned i = 0; i < num_shaders; i++) {
272 foreach_list(node, shader_list[i]->ir) {
273 ir_variable *const var = ((ir_instruction *) node)->as_variable();
274
275 if (var == NULL)
276 continue;
277
278 if (uniforms_only && (var->mode != ir_var_uniform))
279 continue;
280
281 /* Don't cross validate temporaries that are at global scope. These
282 * will eventually get pulled into the shaders 'main'.
283 */
284 if (var->mode == ir_var_temporary)
285 continue;
286
287 /* If a global with this name has already been seen, verify that the
288 * new instance has the same type. In addition, if the globals have
289 * initializers, the values of the initializers must be the same.
290 */
291 ir_variable *const existing = variables.get_variable(var->name);
292 if (existing != NULL) {
293 if (var->type != existing->type) {
294 linker_error_printf(prog, "%s `%s' declared as type "
295 "`%s' and type `%s'\n",
296 mode_string(var),
297 var->name, var->type->name,
298 existing->type->name);
299 return false;
300 }
301
302 /* FINISHME: Handle non-constant initializers.
303 */
304 if (var->constant_value != NULL) {
305 if (existing->constant_value != NULL) {
306 if (!var->constant_value->has_value(existing->constant_value)) {
307 linker_error_printf(prog, "initializers for %s "
308 "`%s' have differing values\n",
309 mode_string(var), var->name);
310 return false;
311 }
312 } else
313 /* If the first-seen instance of a particular uniform did not
314 * have an initializer but a later instance does, copy the
315 * initializer to the version stored in the symbol table.
316 */
317 /* FINISHME: This is wrong. The constant_value field should
318 * FINISHME: not be modified! Imagine a case where a shader
319 * FINISHME: without an initializer is linked in two different
320 * FINISHME: programs with shaders that have differing
321 * FINISHME: initializers. Linking with the first will
322 * FINISHME: modify the shader, and linking with the second
323 * FINISHME: will fail.
324 */
325 existing->constant_value = var->constant_value->clone(NULL);
326 }
327 } else
328 variables.add_variable(var->name, var);
329 }
330 }
331
332 return true;
333 }
334
335
336 /**
337 * Perform validation of uniforms used across multiple shader stages
338 */
339 bool
340 cross_validate_uniforms(struct gl_shader_program *prog)
341 {
342 return cross_validate_globals(prog, prog->_LinkedShaders,
343 prog->_NumLinkedShaders, true);
344 }
345
346
347 /**
348 * Validate that outputs from one stage match inputs of another
349 */
350 bool
351 cross_validate_outputs_to_inputs(struct gl_shader_program *prog,
352 gl_shader *producer, gl_shader *consumer)
353 {
354 glsl_symbol_table parameters;
355 /* FINISHME: Figure these out dynamically. */
356 const char *const producer_stage = "vertex";
357 const char *const consumer_stage = "fragment";
358
359 /* Find all shader outputs in the "producer" stage.
360 */
361 foreach_list(node, producer->ir) {
362 ir_variable *const var = ((ir_instruction *) node)->as_variable();
363
364 /* FINISHME: For geometry shaders, this should also look for inout
365 * FINISHME: variables.
366 */
367 if ((var == NULL) || (var->mode != ir_var_out))
368 continue;
369
370 parameters.add_variable(var->name, var);
371 }
372
373
374 /* Find all shader inputs in the "consumer" stage. Any variables that have
375 * matching outputs already in the symbol table must have the same type and
376 * qualifiers.
377 */
378 foreach_list(node, consumer->ir) {
379 ir_variable *const input = ((ir_instruction *) node)->as_variable();
380
381 /* FINISHME: For geometry shaders, this should also look for inout
382 * FINISHME: variables.
383 */
384 if ((input == NULL) || (input->mode != ir_var_in))
385 continue;
386
387 ir_variable *const output = parameters.get_variable(input->name);
388 if (output != NULL) {
389 /* Check that the types match between stages.
390 */
391 if (input->type != output->type) {
392 linker_error_printf(prog,
393 "%s shader output `%s' delcared as "
394 "type `%s', but %s shader input declared "
395 "as type `%s'\n",
396 producer_stage, output->name,
397 output->type->name,
398 consumer_stage, input->type->name);
399 return false;
400 }
401
402 /* Check that all of the qualifiers match between stages.
403 */
404 if (input->centroid != output->centroid) {
405 linker_error_printf(prog,
406 "%s shader output `%s' %s centroid qualifier, "
407 "but %s shader input %s centroid qualifier\n",
408 producer_stage,
409 output->name,
410 (output->centroid) ? "has" : "lacks",
411 consumer_stage,
412 (input->centroid) ? "has" : "lacks");
413 return false;
414 }
415
416 if (input->invariant != output->invariant) {
417 linker_error_printf(prog,
418 "%s shader output `%s' %s invariant qualifier, "
419 "but %s shader input %s invariant qualifier\n",
420 producer_stage,
421 output->name,
422 (output->invariant) ? "has" : "lacks",
423 consumer_stage,
424 (input->invariant) ? "has" : "lacks");
425 return false;
426 }
427
428 if (input->interpolation != output->interpolation) {
429 linker_error_printf(prog,
430 "%s shader output `%s' specifies %s "
431 "interpolation qualifier, "
432 "but %s shader input specifies %s "
433 "interpolation qualifier\n",
434 producer_stage,
435 output->name,
436 output->interpolation_string(),
437 consumer_stage,
438 input->interpolation_string());
439 return false;
440 }
441 }
442 }
443
444 return true;
445 }
446
447
448 /**
449 * Populates a shaders symbol table with all global declarations
450 */
451 static void
452 populate_symbol_table(gl_shader *sh)
453 {
454 sh->symbols = new(sh) glsl_symbol_table;
455
456 foreach_list(node, sh->ir) {
457 ir_instruction *const inst = (ir_instruction *) node;
458 ir_variable *var;
459 ir_function *func;
460
461 if ((func = inst->as_function()) != NULL) {
462 sh->symbols->add_function(func->name, func);
463 } else if ((var = inst->as_variable()) != NULL) {
464 sh->symbols->add_variable(var->name, var);
465 }
466 }
467 }
468
469
470 /**
471 * Remap variables referenced in an instruction tree
472 *
473 * This is used when instruction trees are cloned from one shader and placed in
474 * another. These trees will contain references to \c ir_variable nodes that
475 * do not exist in the target shader. This function finds these \c ir_variable
476 * references and replaces the references with matching variables in the target
477 * shader.
478 *
479 * If there is no matching variable in the target shader, a clone of the
480 * \c ir_variable is made and added to the target shader. The new variable is
481 * added to \b both the instruction stream and the symbol table.
482 *
483 * \param inst IR tree that is to be processed.
484 * \param symbols Symbol table containing global scope symbols in the
485 * linked shader.
486 * \param instructions Instruction stream where new variable declarations
487 * should be added.
488 */
489 void
490 remap_variables(ir_instruction *inst, glsl_symbol_table *symbols,
491 exec_list *instructions, hash_table *temps)
492 {
493 class remap_visitor : public ir_hierarchical_visitor {
494 public:
495 remap_visitor(glsl_symbol_table *symbols, exec_list *instructions,
496 hash_table *temps)
497 {
498 this->symbols = symbols;
499 this->instructions = instructions;
500 this->temps = temps;
501 }
502
503 virtual ir_visitor_status visit(ir_dereference_variable *ir)
504 {
505 if (ir->var->mode == ir_var_temporary) {
506 ir_variable *var = (ir_variable *) hash_table_find(temps, ir->var);
507
508 assert(var != NULL);
509 ir->var = var;
510 return visit_continue;
511 }
512
513 ir_variable *const existing =
514 this->symbols->get_variable(ir->var->name);
515 if (existing != NULL)
516 ir->var = existing;
517 else {
518 ir_variable *copy = ir->var->clone(NULL);
519
520 this->symbols->add_variable(copy->name, copy);
521 this->instructions->push_head(copy);
522 ir->var = copy;
523 }
524
525 return visit_continue;
526 }
527
528 private:
529 glsl_symbol_table *symbols;
530 exec_list *instructions;
531 hash_table *temps;
532 };
533
534 remap_visitor v(symbols, instructions, temps);
535
536 inst->accept(&v);
537 }
538
539
540 /**
541 * Move non-declarations from one instruction stream to another
542 *
543 * The intended usage pattern of this function is to pass the pointer to the
544 * head sentinal of a list (i.e., a pointer to the list cast to an \c exec_node
545 * pointer) for \c last and \c false for \c make_copies on the first
546 * call. Successive calls pass the return value of the previous call for
547 * \c last and \c true for \c make_copies.
548 *
549 * \param instructions Source instruction stream
550 * \param last Instruction after which new instructions should be
551 * inserted in the target instruction stream
552 * \param make_copies Flag selecting whether instructions in \c instructions
553 * should be copied (via \c ir_instruction::clone) into the
554 * target list or moved.
555 *
556 * \return
557 * The new "last" instruction in the target instruction stream. This pointer
558 * is suitable for use as the \c last parameter of a later call to this
559 * function.
560 */
561 exec_node *
562 move_non_declarations(exec_list *instructions, exec_node *last,
563 bool make_copies, gl_shader *target)
564 {
565 hash_table *temps = NULL;
566
567 if (make_copies)
568 temps = hash_table_ctor(0, hash_table_pointer_hash,
569 hash_table_pointer_compare);
570
571 foreach_list_safe(node, instructions) {
572 ir_instruction *inst = (ir_instruction *) node;
573
574 if (inst->as_function())
575 continue;
576
577 ir_variable *var = inst->as_variable();
578 if ((var != NULL) && (var->mode != ir_var_temporary))
579 continue;
580
581 assert(inst->as_assignment()
582 || ((var != NULL) && (var->mode == ir_var_temporary)));
583
584 if (make_copies) {
585 inst = inst->clone(NULL);
586
587 if (var != NULL)
588 hash_table_insert(temps, inst, var);
589 else
590 remap_variables(inst, target->symbols, target->ir, temps);
591 } else {
592 inst->remove();
593 }
594
595 last->insert_after(inst);
596 last = inst;
597 }
598
599 if (make_copies)
600 hash_table_dtor(temps);
601
602 return last;
603 }
604
605 /**
606 * Get the function signature for main from a shader
607 */
608 static ir_function_signature *
609 get_main_function_signature(gl_shader *sh)
610 {
611 ir_function *const f = sh->symbols->get_function("main");
612 if (f != NULL) {
613 exec_list void_parameters;
614
615 /* Look for the 'void main()' signature and ensure that it's defined.
616 * This keeps the linker from accidentally pick a shader that just
617 * contains a prototype for main.
618 *
619 * We don't have to check for multiple definitions of main (in multiple
620 * shaders) because that would have already been caught above.
621 */
622 ir_function_signature *sig = f->matching_signature(&void_parameters);
623 if ((sig != NULL) && sig->is_defined) {
624 return sig;
625 }
626 }
627
628 return NULL;
629 }
630
631
632 /**
633 * Combine a group of shaders for a single stage to generate a linked shader
634 *
635 * \note
636 * If this function is supplied a single shader, it is cloned, and the new
637 * shader is returned.
638 */
639 static struct gl_shader *
640 link_intrastage_shaders(struct gl_shader_program *prog,
641 struct gl_shader **shader_list,
642 unsigned num_shaders)
643 {
644 /* Check that global variables defined in multiple shaders are consistent.
645 */
646 if (!cross_validate_globals(prog, shader_list, num_shaders, false))
647 return NULL;
648
649 /* Check that there is only a single definition of each function signature
650 * across all shaders.
651 */
652 for (unsigned i = 0; i < (num_shaders - 1); i++) {
653 foreach_list(node, shader_list[i]->ir) {
654 ir_function *const f = ((ir_instruction *) node)->as_function();
655
656 if (f == NULL)
657 continue;
658
659 for (unsigned j = i + 1; j < num_shaders; j++) {
660 ir_function *const other =
661 shader_list[j]->symbols->get_function(f->name);
662
663 /* If the other shader has no function (and therefore no function
664 * signatures) with the same name, skip to the next shader.
665 */
666 if (other == NULL)
667 continue;
668
669 foreach_iter (exec_list_iterator, iter, *f) {
670 ir_function_signature *sig =
671 (ir_function_signature *) iter.get();
672
673 if (!sig->is_defined || sig->is_built_in)
674 continue;
675
676 ir_function_signature *other_sig =
677 other->exact_matching_signature(& sig->parameters);
678
679 if ((other_sig != NULL) && other_sig->is_defined
680 && !other_sig->is_built_in) {
681 linker_error_printf(prog,
682 "function `%s' is multiply defined",
683 f->name);
684 return NULL;
685 }
686 }
687 }
688 }
689 }
690
691 /* Find the shader that defines main, and make a clone of it.
692 *
693 * Starting with the clone, search for undefined references. If one is
694 * found, find the shader that defines it. Clone the reference and add
695 * it to the shader. Repeat until there are no undefined references or
696 * until a reference cannot be resolved.
697 */
698 gl_shader *main = NULL;
699 for (unsigned i = 0; i < num_shaders; i++) {
700 if (get_main_function_signature(shader_list[i]) != NULL) {
701 main = shader_list[i];
702 break;
703 }
704 }
705
706 if (main == NULL) {
707 linker_error_printf(prog, "%s shader lacks `main'\n",
708 (shader_list[0]->Type == GL_VERTEX_SHADER)
709 ? "vertex" : "fragment");
710 return NULL;
711 }
712
713 gl_shader *const linked = _mesa_new_shader(NULL, 0, main->Type);
714 linked->ir = new(linked) exec_list;
715 clone_ir_list(linked->ir, main->ir);
716
717 populate_symbol_table(linked);
718
719 /* The a pointer to the main function in the final linked shader (i.e., the
720 * copy of the original shader that contained the main function).
721 */
722 ir_function_signature *const main_sig = get_main_function_signature(linked);
723
724 /* Move any instructions other than variable declarations or function
725 * declarations into main.
726 */
727 exec_node *insertion_point =
728 move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
729 linked);
730
731 for (unsigned i = 0; i < num_shaders; i++) {
732 if (shader_list[i] == main)
733 continue;
734
735 insertion_point = move_non_declarations(shader_list[i]->ir,
736 insertion_point, true, linked);
737 }
738
739 /* Resolve initializers for global variables in the linked shader.
740 */
741 link_function_calls(prog, linked, shader_list, num_shaders);
742
743 return linked;
744 }
745
746
747 struct uniform_node {
748 exec_node link;
749 struct gl_uniform *u;
750 unsigned slots;
751 };
752
753 void
754 assign_uniform_locations(struct gl_shader_program *prog)
755 {
756 /* */
757 exec_list uniforms;
758 unsigned total_uniforms = 0;
759 hash_table *ht = hash_table_ctor(32, hash_table_string_hash,
760 hash_table_string_compare);
761
762 for (unsigned i = 0; i < prog->_NumLinkedShaders; i++) {
763 unsigned next_position = 0;
764
765 foreach_list(node, prog->_LinkedShaders[i]->ir) {
766 ir_variable *const var = ((ir_instruction *) node)->as_variable();
767
768 if ((var == NULL) || (var->mode != ir_var_uniform))
769 continue;
770
771 const unsigned vec4_slots = (var->component_slots() + 3) / 4;
772 assert(vec4_slots != 0);
773
774 uniform_node *n = (uniform_node *) hash_table_find(ht, var->name);
775 if (n == NULL) {
776 n = (uniform_node *) calloc(1, sizeof(struct uniform_node));
777 n->u = (gl_uniform *) calloc(vec4_slots, sizeof(struct gl_uniform));
778 n->slots = vec4_slots;
779
780 n->u[0].Name = strdup(var->name);
781 for (unsigned j = 1; j < vec4_slots; j++)
782 n->u[j].Name = n->u[0].Name;
783
784 hash_table_insert(ht, n, n->u[0].Name);
785 uniforms.push_tail(& n->link);
786 total_uniforms += vec4_slots;
787 }
788
789 if (var->constant_value != NULL)
790 for (unsigned j = 0; j < vec4_slots; j++)
791 n->u[j].Initialized = true;
792
793 var->location = next_position;
794
795 for (unsigned j = 0; j < vec4_slots; j++) {
796 switch (prog->_LinkedShaders[i]->Type) {
797 case GL_VERTEX_SHADER:
798 n->u[j].VertPos = next_position;
799 break;
800 case GL_FRAGMENT_SHADER:
801 n->u[j].FragPos = next_position;
802 break;
803 case GL_GEOMETRY_SHADER:
804 /* FINISHME: Support geometry shaders. */
805 assert(prog->_LinkedShaders[i]->Type != GL_GEOMETRY_SHADER);
806 break;
807 }
808
809 next_position++;
810 }
811 }
812 }
813
814 gl_uniform_list *ul = (gl_uniform_list *)
815 calloc(1, sizeof(gl_uniform_list));
816
817 ul->Size = total_uniforms;
818 ul->NumUniforms = total_uniforms;
819 ul->Uniforms = (gl_uniform *) calloc(total_uniforms, sizeof(gl_uniform));
820
821 unsigned idx = 0;
822 uniform_node *next;
823 for (uniform_node *node = (uniform_node *) uniforms.head
824 ; node->link.next != NULL
825 ; node = next) {
826 next = (uniform_node *) node->link.next;
827
828 node->link.remove();
829 memcpy(&ul->Uniforms[idx], node->u, sizeof(gl_uniform) * node->slots);
830 idx += node->slots;
831
832 free(node->u);
833 free(node);
834 }
835
836 hash_table_dtor(ht);
837
838 prog->Uniforms = ul;
839 }
840
841
842 /**
843 * Find a contiguous set of available bits in a bitmask
844 *
845 * \param used_mask Bits representing used (1) and unused (0) locations
846 * \param needed_count Number of contiguous bits needed.
847 *
848 * \return
849 * Base location of the available bits on success or -1 on failure.
850 */
851 int
852 find_available_slots(unsigned used_mask, unsigned needed_count)
853 {
854 unsigned needed_mask = (1 << needed_count) - 1;
855 const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
856
857 /* The comparison to 32 is redundant, but without it GCC emits "warning:
858 * cannot optimize possibly infinite loops" for the loop below.
859 */
860 if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
861 return -1;
862
863 for (int i = 0; i <= max_bit_to_test; i++) {
864 if ((needed_mask & ~used_mask) == needed_mask)
865 return i;
866
867 needed_mask <<= 1;
868 }
869
870 return -1;
871 }
872
873
874 bool
875 assign_attribute_locations(gl_shader_program *prog, unsigned max_attribute_index)
876 {
877 /* Mark invalid attribute locations as being used.
878 */
879 unsigned used_locations = (max_attribute_index >= 32)
880 ? ~0 : ~((1 << max_attribute_index) - 1);
881
882 gl_shader *const sh = prog->_LinkedShaders[0];
883 assert(sh->Type == GL_VERTEX_SHADER);
884
885 /* Operate in a total of four passes.
886 *
887 * 1. Invalidate the location assignments for all vertex shader inputs.
888 *
889 * 2. Assign locations for inputs that have user-defined (via
890 * glBindVertexAttribLocation) locatoins.
891 *
892 * 3. Sort the attributes without assigned locations by number of slots
893 * required in decreasing order. Fragmentation caused by attribute
894 * locations assigned by the application may prevent large attributes
895 * from having enough contiguous space.
896 *
897 * 4. Assign locations to any inputs without assigned locations.
898 */
899
900 invalidate_variable_locations(sh, ir_var_in, VERT_ATTRIB_GENERIC0);
901
902 if (prog->Attributes != NULL) {
903 for (unsigned i = 0; i < prog->Attributes->NumParameters; i++) {
904 ir_variable *const var =
905 sh->symbols->get_variable(prog->Attributes->Parameters[i].Name);
906
907 /* Note: attributes that occupy multiple slots, such as arrays or
908 * matrices, may appear in the attrib array multiple times.
909 */
910 if ((var == NULL) || (var->location != -1))
911 continue;
912
913 /* From page 61 of the OpenGL 4.0 spec:
914 *
915 * "LinkProgram will fail if the attribute bindings assigned by
916 * BindAttribLocation do not leave not enough space to assign a
917 * location for an active matrix attribute or an active attribute
918 * array, both of which require multiple contiguous generic
919 * attributes."
920 *
921 * Previous versions of the spec contain similar language but omit the
922 * bit about attribute arrays.
923 *
924 * Page 61 of the OpenGL 4.0 spec also says:
925 *
926 * "It is possible for an application to bind more than one
927 * attribute name to the same location. This is referred to as
928 * aliasing. This will only work if only one of the aliased
929 * attributes is active in the executable program, or if no path
930 * through the shader consumes more than one attribute of a set
931 * of attributes aliased to the same location. A link error can
932 * occur if the linker determines that every path through the
933 * shader consumes multiple aliased attributes, but
934 * implementations are not required to generate an error in this
935 * case."
936 *
937 * These two paragraphs are either somewhat contradictory, or I don't
938 * fully understand one or both of them.
939 */
940 /* FINISHME: The code as currently written does not support attribute
941 * FINISHME: location aliasing (see comment above).
942 */
943 const int attr = prog->Attributes->Parameters[i].StateIndexes[0];
944 const unsigned slots = count_attribute_slots(var->type);
945
946 /* Mask representing the contiguous slots that will be used by this
947 * attribute.
948 */
949 const unsigned use_mask = (1 << slots) - 1;
950
951 /* Generate a link error if the set of bits requested for this
952 * attribute overlaps any previously allocated bits.
953 */
954 if ((~(use_mask << attr) & used_locations) != used_locations) {
955 linker_error_printf(prog,
956 "insufficient contiguous attribute locations "
957 "available for vertex shader input `%s'",
958 var->name);
959 return false;
960 }
961
962 var->location = VERT_ATTRIB_GENERIC0 + attr;
963 used_locations |= (use_mask << attr);
964 }
965 }
966
967 /* Temporary storage for the set of attributes that need locations assigned.
968 */
969 struct temp_attr {
970 unsigned slots;
971 ir_variable *var;
972
973 /* Used below in the call to qsort. */
974 static int compare(const void *a, const void *b)
975 {
976 const temp_attr *const l = (const temp_attr *) a;
977 const temp_attr *const r = (const temp_attr *) b;
978
979 /* Reversed because we want a descending order sort below. */
980 return r->slots - l->slots;
981 }
982 } to_assign[16];
983
984 unsigned num_attr = 0;
985
986 foreach_list(node, sh->ir) {
987 ir_variable *const var = ((ir_instruction *) node)->as_variable();
988
989 if ((var == NULL) || (var->mode != ir_var_in))
990 continue;
991
992 /* The location was explicitly assigned, nothing to do here.
993 */
994 if (var->location != -1)
995 continue;
996
997 to_assign[num_attr].slots = count_attribute_slots(var->type);
998 to_assign[num_attr].var = var;
999 num_attr++;
1000 }
1001
1002 /* If all of the attributes were assigned locations by the application (or
1003 * are built-in attributes with fixed locations), return early. This should
1004 * be the common case.
1005 */
1006 if (num_attr == 0)
1007 return true;
1008
1009 qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
1010
1011 /* VERT_ATTRIB_GENERIC0 is a psdueo-alias for VERT_ATTRIB_POS. It can only
1012 * be explicitly assigned by via glBindAttribLocation. Mark it as reserved
1013 * to prevent it from being automatically allocated below.
1014 */
1015 used_locations |= (1 << 0);
1016
1017 for (unsigned i = 0; i < num_attr; i++) {
1018 /* Mask representing the contiguous slots that will be used by this
1019 * attribute.
1020 */
1021 const unsigned use_mask = (1 << to_assign[i].slots) - 1;
1022
1023 int location = find_available_slots(used_locations, to_assign[i].slots);
1024
1025 if (location < 0) {
1026 linker_error_printf(prog,
1027 "insufficient contiguous attribute locations "
1028 "available for vertex shader input `%s'",
1029 to_assign[i].var->name);
1030 return false;
1031 }
1032
1033 to_assign[i].var->location = VERT_ATTRIB_GENERIC0 + location;
1034 used_locations |= (use_mask << location);
1035 }
1036
1037 return true;
1038 }
1039
1040
1041 void
1042 assign_varying_locations(gl_shader *producer, gl_shader *consumer)
1043 {
1044 /* FINISHME: Set dynamically when geometry shader support is added. */
1045 unsigned output_index = VERT_RESULT_VAR0;
1046 unsigned input_index = FRAG_ATTRIB_VAR0;
1047
1048 /* Operate in a total of three passes.
1049 *
1050 * 1. Assign locations for any matching inputs and outputs.
1051 *
1052 * 2. Mark output variables in the producer that do not have locations as
1053 * not being outputs. This lets the optimizer eliminate them.
1054 *
1055 * 3. Mark input variables in the consumer that do not have locations as
1056 * not being inputs. This lets the optimizer eliminate them.
1057 */
1058
1059 invalidate_variable_locations(producer, ir_var_out, VERT_RESULT_VAR0);
1060 invalidate_variable_locations(consumer, ir_var_in, FRAG_ATTRIB_VAR0);
1061
1062 foreach_list(node, producer->ir) {
1063 ir_variable *const output_var = ((ir_instruction *) node)->as_variable();
1064
1065 if ((output_var == NULL) || (output_var->mode != ir_var_out)
1066 || (output_var->location != -1))
1067 continue;
1068
1069 ir_variable *const input_var =
1070 consumer->symbols->get_variable(output_var->name);
1071
1072 if ((input_var == NULL) || (input_var->mode != ir_var_in))
1073 continue;
1074
1075 assert(input_var->location == -1);
1076
1077 /* FINISHME: Location assignment will need some changes when arrays,
1078 * FINISHME: matrices, and structures are allowed as shader inputs /
1079 * FINISHME: outputs.
1080 */
1081 output_var->location = output_index;
1082 input_var->location = input_index;
1083
1084 output_index++;
1085 input_index++;
1086 }
1087
1088 foreach_list(node, producer->ir) {
1089 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1090
1091 if ((var == NULL) || (var->mode != ir_var_out))
1092 continue;
1093
1094 /* An 'out' variable is only really a shader output if its value is read
1095 * by the following stage.
1096 */
1097 if (var->location == -1) {
1098 var->shader_out = false;
1099 var->mode = ir_var_auto;
1100 }
1101 }
1102
1103 foreach_list(node, consumer->ir) {
1104 ir_variable *const var = ((ir_instruction *) node)->as_variable();
1105
1106 if ((var == NULL) || (var->mode != ir_var_in))
1107 continue;
1108
1109 /* An 'in' variable is only really a shader input if its value is written
1110 * by the previous stage.
1111 */
1112 var->shader_in = (var->location != -1);
1113 }
1114 }
1115
1116
1117 void
1118 link_shaders(struct gl_shader_program *prog)
1119 {
1120 prog->LinkStatus = false;
1121 prog->Validated = false;
1122 prog->_Used = false;
1123
1124 if (prog->InfoLog != NULL)
1125 talloc_free(prog->InfoLog);
1126
1127 prog->InfoLog = talloc_strdup(NULL, "");
1128
1129 /* Separate the shaders into groups based on their type.
1130 */
1131 struct gl_shader **vert_shader_list;
1132 unsigned num_vert_shaders = 0;
1133 struct gl_shader **frag_shader_list;
1134 unsigned num_frag_shaders = 0;
1135
1136 vert_shader_list = (struct gl_shader **)
1137 calloc(2 * prog->NumShaders, sizeof(struct gl_shader *));
1138 frag_shader_list = &vert_shader_list[prog->NumShaders];
1139
1140 unsigned min_version = UINT_MAX;
1141 unsigned max_version = 0;
1142 for (unsigned i = 0; i < prog->NumShaders; i++) {
1143 min_version = MIN2(min_version, prog->Shaders[i]->Version);
1144 max_version = MAX2(max_version, prog->Shaders[i]->Version);
1145
1146 switch (prog->Shaders[i]->Type) {
1147 case GL_VERTEX_SHADER:
1148 vert_shader_list[num_vert_shaders] = prog->Shaders[i];
1149 num_vert_shaders++;
1150 break;
1151 case GL_FRAGMENT_SHADER:
1152 frag_shader_list[num_frag_shaders] = prog->Shaders[i];
1153 num_frag_shaders++;
1154 break;
1155 case GL_GEOMETRY_SHADER:
1156 /* FINISHME: Support geometry shaders. */
1157 assert(prog->Shaders[i]->Type != GL_GEOMETRY_SHADER);
1158 break;
1159 }
1160 }
1161
1162 /* Previous to GLSL version 1.30, different compilation units could mix and
1163 * match shading language versions. With GLSL 1.30 and later, the versions
1164 * of all shaders must match.
1165 */
1166 assert(min_version >= 110);
1167 assert(max_version <= 130);
1168 if ((max_version >= 130) && (min_version != max_version)) {
1169 linker_error_printf(prog, "all shaders must use same shading "
1170 "language version\n");
1171 goto done;
1172 }
1173
1174 prog->Version = max_version;
1175
1176 /* Link all shaders for a particular stage and validate the result.
1177 */
1178 prog->_NumLinkedShaders = 0;
1179 if (num_vert_shaders > 0) {
1180 gl_shader *const sh =
1181 link_intrastage_shaders(prog, vert_shader_list, num_vert_shaders);
1182
1183 if (sh == NULL)
1184 goto done;
1185
1186 if (!validate_vertex_shader_executable(prog, sh))
1187 goto done;
1188
1189 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh;
1190 prog->_NumLinkedShaders++;
1191 }
1192
1193 if (num_frag_shaders > 0) {
1194 gl_shader *const sh =
1195 link_intrastage_shaders(prog, frag_shader_list, num_frag_shaders);
1196
1197 if (sh == NULL)
1198 goto done;
1199
1200 if (!validate_fragment_shader_executable(prog, sh))
1201 goto done;
1202
1203 prog->_LinkedShaders[prog->_NumLinkedShaders] = sh;
1204 prog->_NumLinkedShaders++;
1205 }
1206
1207 /* Here begins the inter-stage linking phase. Some initial validation is
1208 * performed, then locations are assigned for uniforms, attributes, and
1209 * varyings.
1210 */
1211 if (cross_validate_uniforms(prog)) {
1212 /* Validate the inputs of each stage with the output of the preceeding
1213 * stage.
1214 */
1215 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++) {
1216 if (!cross_validate_outputs_to_inputs(prog,
1217 prog->_LinkedShaders[i - 1],
1218 prog->_LinkedShaders[i]))
1219 goto done;
1220 }
1221
1222 prog->LinkStatus = true;
1223 }
1224
1225 /* FINISHME: Perform whole-program optimization here. */
1226
1227 assign_uniform_locations(prog);
1228
1229 if (prog->_LinkedShaders[0]->Type == GL_VERTEX_SHADER)
1230 /* FINISHME: The value of the max_attribute_index parameter is
1231 * FINISHME: implementation dependent based on the value of
1232 * FINISHME: GL_MAX_VERTEX_ATTRIBS. GL_MAX_VERTEX_ATTRIBS must be
1233 * FINISHME: at least 16, so hardcode 16 for now.
1234 */
1235 if (!assign_attribute_locations(prog, 16))
1236 goto done;
1237
1238 for (unsigned i = 1; i < prog->_NumLinkedShaders; i++)
1239 assign_varying_locations(prog->_LinkedShaders[i - 1],
1240 prog->_LinkedShaders[i]);
1241
1242 /* FINISHME: Assign fragment shader output locations. */
1243
1244 done:
1245 free(vert_shader_list);
1246 }