glsl/linker: refactor link-time validation of output locations
[mesa.git] / src / compiler / glsl / link_varyings.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file link_varyings.cpp
26 *
27 * Linker functions related specifically to linking varyings between shader
28 * stages.
29 */
30
31
32 #include "main/mtypes.h"
33 #include "glsl_symbol_table.h"
34 #include "glsl_parser_extras.h"
35 #include "ir_optimization.h"
36 #include "linker.h"
37 #include "link_varyings.h"
38 #include "main/macros.h"
39 #include "util/hash_table.h"
40 #include "program.h"
41
42
43 /**
44 * Get the varying type stripped of the outermost array if we're processing
45 * a stage whose varyings are arrays indexed by a vertex number (such as
46 * geometry shader inputs).
47 */
48 static const glsl_type *
49 get_varying_type(const ir_variable *var, gl_shader_stage stage)
50 {
51 const glsl_type *type = var->type;
52
53 if (!var->data.patch &&
54 ((var->data.mode == ir_var_shader_out &&
55 stage == MESA_SHADER_TESS_CTRL) ||
56 (var->data.mode == ir_var_shader_in &&
57 (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
58 stage == MESA_SHADER_GEOMETRY)))) {
59 assert(type->is_array());
60 type = type->fields.array;
61 }
62
63 return type;
64 }
65
66 static void
67 create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
68 size_t name_length, unsigned *count,
69 const char *ifc_member_name,
70 const glsl_type *ifc_member_t, char ***varying_names)
71 {
72 if (t->is_interface()) {
73 size_t new_length = name_length;
74
75 assert(ifc_member_name && ifc_member_t);
76 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
77
78 create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
79 NULL, NULL, varying_names);
80 } else if (t->is_record()) {
81 for (unsigned i = 0; i < t->length; i++) {
82 const char *field = t->fields.structure[i].name;
83 size_t new_length = name_length;
84
85 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
86
87 create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
88 new_length, count, NULL, NULL,
89 varying_names);
90 }
91 } else if (t->without_array()->is_record() ||
92 t->without_array()->is_interface() ||
93 (t->is_array() && t->fields.array->is_array())) {
94 for (unsigned i = 0; i < t->length; i++) {
95 size_t new_length = name_length;
96
97 /* Append the subscript to the current variable name */
98 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
99
100 create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
101 count, ifc_member_name, ifc_member_t,
102 varying_names);
103 }
104 } else {
105 (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
106 }
107 }
108
109 static bool
110 process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
111 struct gl_shader_program *prog,
112 unsigned *num_tfeedback_decls,
113 char ***varying_names)
114 {
115 bool has_xfb_qualifiers = false;
116
117 /* We still need to enable transform feedback mode even if xfb_stride is
118 * only applied to a global out. Also we don't bother to propagate
119 * xfb_stride to interface block members so this will catch that case also.
120 */
121 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
122 if (prog->TransformFeedback.BufferStride[j]) {
123 has_xfb_qualifiers = true;
124 break;
125 }
126 }
127
128 foreach_in_list(ir_instruction, node, sh->ir) {
129 ir_variable *var = node->as_variable();
130 if (!var || var->data.mode != ir_var_shader_out)
131 continue;
132
133 /* From the ARB_enhanced_layouts spec:
134 *
135 * "Any shader making any static use (after preprocessing) of any of
136 * these *xfb_* qualifiers will cause the shader to be in a
137 * transform feedback capturing mode and hence responsible for
138 * describing the transform feedback setup. This mode will capture
139 * any output selected by *xfb_offset*, directly or indirectly, to
140 * a transform feedback buffer."
141 */
142 if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
143 has_xfb_qualifiers = true;
144 }
145
146 if (var->data.explicit_xfb_offset) {
147 *num_tfeedback_decls += var->type->varying_count();
148 has_xfb_qualifiers = true;
149 }
150 }
151
152 if (*num_tfeedback_decls == 0)
153 return has_xfb_qualifiers;
154
155 unsigned i = 0;
156 *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
157 foreach_in_list(ir_instruction, node, sh->ir) {
158 ir_variable *var = node->as_variable();
159 if (!var || var->data.mode != ir_var_shader_out)
160 continue;
161
162 if (var->data.explicit_xfb_offset) {
163 char *name;
164 const glsl_type *type, *member_type;
165
166 if (var->data.from_named_ifc_block) {
167 type = var->get_interface_type();
168 /* Find the member type before it was altered by lowering */
169 member_type =
170 type->fields.structure[type->field_index(var->name)].type;
171 name = ralloc_strdup(NULL, type->without_array()->name);
172 } else {
173 type = var->type;
174 member_type = NULL;
175 name = ralloc_strdup(NULL, var->name);
176 }
177 create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
178 var->name, member_type, varying_names);
179 ralloc_free(name);
180 }
181 }
182
183 assert(i == *num_tfeedback_decls);
184 return has_xfb_qualifiers;
185 }
186
187 /**
188 * Validate the types and qualifiers of an output from one stage against the
189 * matching input to another stage.
190 */
191 static void
192 cross_validate_types_and_qualifiers(struct gl_shader_program *prog,
193 const ir_variable *input,
194 const ir_variable *output,
195 gl_shader_stage consumer_stage,
196 gl_shader_stage producer_stage)
197 {
198 /* Check that the types match between stages.
199 */
200 const glsl_type *type_to_match = input->type;
201
202 /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
203 const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
204 consumer_stage != MESA_SHADER_FRAGMENT) ||
205 consumer_stage == MESA_SHADER_GEOMETRY;
206 if (extra_array_level) {
207 assert(type_to_match->is_array());
208 type_to_match = type_to_match->fields.array;
209 }
210
211 if (type_to_match != output->type) {
212 /* There is a bit of a special case for gl_TexCoord. This
213 * built-in is unsized by default. Applications that variable
214 * access it must redeclare it with a size. There is some
215 * language in the GLSL spec that implies the fragment shader
216 * and vertex shader do not have to agree on this size. Other
217 * driver behave this way, and one or two applications seem to
218 * rely on it.
219 *
220 * Neither declaration needs to be modified here because the array
221 * sizes are fixed later when update_array_sizes is called.
222 *
223 * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
224 *
225 * "Unlike user-defined varying variables, the built-in
226 * varying variables don't have a strict one-to-one
227 * correspondence between the vertex language and the
228 * fragment language."
229 */
230 if (!output->type->is_array() || !is_gl_identifier(output->name)) {
231 linker_error(prog,
232 "%s shader output `%s' declared as type `%s', "
233 "but %s shader input declared as type `%s'\n",
234 _mesa_shader_stage_to_string(producer_stage),
235 output->name,
236 output->type->name,
237 _mesa_shader_stage_to_string(consumer_stage),
238 input->type->name);
239 return;
240 }
241 }
242
243 /* Check that all of the qualifiers match between stages.
244 */
245
246 /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
247 * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
248 * conformance test suite does not verify that the qualifiers must match.
249 * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
250 * OpenGLES 3.0 drivers, so we relax the checking in all cases.
251 */
252 if (false /* always skip the centroid check */ &&
253 prog->data->Version < (prog->IsES ? 310 : 430) &&
254 input->data.centroid != output->data.centroid) {
255 linker_error(prog,
256 "%s shader output `%s' %s centroid qualifier, "
257 "but %s shader input %s centroid qualifier\n",
258 _mesa_shader_stage_to_string(producer_stage),
259 output->name,
260 (output->data.centroid) ? "has" : "lacks",
261 _mesa_shader_stage_to_string(consumer_stage),
262 (input->data.centroid) ? "has" : "lacks");
263 return;
264 }
265
266 if (input->data.sample != output->data.sample) {
267 linker_error(prog,
268 "%s shader output `%s' %s sample qualifier, "
269 "but %s shader input %s sample qualifier\n",
270 _mesa_shader_stage_to_string(producer_stage),
271 output->name,
272 (output->data.sample) ? "has" : "lacks",
273 _mesa_shader_stage_to_string(consumer_stage),
274 (input->data.sample) ? "has" : "lacks");
275 return;
276 }
277
278 if (input->data.patch != output->data.patch) {
279 linker_error(prog,
280 "%s shader output `%s' %s patch qualifier, "
281 "but %s shader input %s patch qualifier\n",
282 _mesa_shader_stage_to_string(producer_stage),
283 output->name,
284 (output->data.patch) ? "has" : "lacks",
285 _mesa_shader_stage_to_string(consumer_stage),
286 (input->data.patch) ? "has" : "lacks");
287 return;
288 }
289
290 /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
291 *
292 * "As only outputs need be declared with invariant, an output from
293 * one shader stage will still match an input of a subsequent stage
294 * without the input being declared as invariant."
295 *
296 * while GLSL 4.20 says:
297 *
298 * "For variables leaving one shader and coming into another shader,
299 * the invariant keyword has to be used in both shaders, or a link
300 * error will result."
301 *
302 * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
303 *
304 * "The invariance of varyings that are declared in both the vertex
305 * and fragment shaders must match."
306 */
307 if (input->data.invariant != output->data.invariant &&
308 prog->data->Version < (prog->IsES ? 300 : 430)) {
309 linker_error(prog,
310 "%s shader output `%s' %s invariant qualifier, "
311 "but %s shader input %s invariant qualifier\n",
312 _mesa_shader_stage_to_string(producer_stage),
313 output->name,
314 (output->data.invariant) ? "has" : "lacks",
315 _mesa_shader_stage_to_string(consumer_stage),
316 (input->data.invariant) ? "has" : "lacks");
317 return;
318 }
319
320 /* GLSL >= 4.40 removes text requiring interpolation qualifiers
321 * to match cross stage, they must only match within the same stage.
322 *
323 * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
324 *
325 * "It is a link-time error if, within the same stage, the interpolation
326 * qualifiers of variables of the same name do not match.
327 *
328 * Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
329 *
330 * "When no interpolation qualifier is present, smooth interpolation
331 * is used."
332 *
333 * So we match variables where one is smooth and the other has no explicit
334 * qualifier.
335 */
336 unsigned input_interpolation = input->data.interpolation;
337 unsigned output_interpolation = output->data.interpolation;
338 if (prog->IsES) {
339 if (input_interpolation == INTERP_MODE_NONE)
340 input_interpolation = INTERP_MODE_SMOOTH;
341 if (output_interpolation == INTERP_MODE_NONE)
342 output_interpolation = INTERP_MODE_SMOOTH;
343 }
344 if (input_interpolation != output_interpolation &&
345 prog->data->Version < 440) {
346 linker_error(prog,
347 "%s shader output `%s' specifies %s "
348 "interpolation qualifier, "
349 "but %s shader input specifies %s "
350 "interpolation qualifier\n",
351 _mesa_shader_stage_to_string(producer_stage),
352 output->name,
353 interpolation_string(output->data.interpolation),
354 _mesa_shader_stage_to_string(consumer_stage),
355 interpolation_string(input->data.interpolation));
356 return;
357 }
358 }
359
360 /**
361 * Validate front and back color outputs against single color input
362 */
363 static void
364 cross_validate_front_and_back_color(struct gl_shader_program *prog,
365 const ir_variable *input,
366 const ir_variable *front_color,
367 const ir_variable *back_color,
368 gl_shader_stage consumer_stage,
369 gl_shader_stage producer_stage)
370 {
371 if (front_color != NULL && front_color->data.assigned)
372 cross_validate_types_and_qualifiers(prog, input, front_color,
373 consumer_stage, producer_stage);
374
375 if (back_color != NULL && back_color->data.assigned)
376 cross_validate_types_and_qualifiers(prog, input, back_color,
377 consumer_stage, producer_stage);
378 }
379
380 static unsigned
381 compute_variable_location_slot(ir_variable *var, gl_shader_stage stage)
382 {
383 unsigned location_start = VARYING_SLOT_VAR0;
384
385 switch (stage) {
386 case MESA_SHADER_VERTEX:
387 if (var->data.mode == ir_var_shader_in)
388 location_start = VERT_ATTRIB_GENERIC0;
389 break;
390 case MESA_SHADER_TESS_CTRL:
391 case MESA_SHADER_TESS_EVAL:
392 if (var->data.patch)
393 location_start = VARYING_SLOT_PATCH0;
394 break;
395 case MESA_SHADER_FRAGMENT:
396 if (var->data.mode == ir_var_shader_out)
397 location_start = FRAG_RESULT_DATA0;
398 break;
399 default:
400 break;
401 }
402
403 return var->data.location - location_start;
404 }
405
406 static bool
407 check_location_aliasing(ir_variable *explicit_locations[][4],
408 ir_variable *var,
409 unsigned location,
410 unsigned component,
411 unsigned location_limit,
412 const glsl_type *type,
413 gl_shader_program *prog,
414 gl_shader_stage stage)
415 {
416 unsigned last_comp;
417 if (type->without_array()->is_record()) {
418 /* The component qualifier can't be used on structs so just treat
419 * all component slots as used.
420 */
421 last_comp = 4;
422 } else {
423 unsigned dmul = type->without_array()->is_64bit() ? 2 : 1;
424 last_comp = component + type->without_array()->vector_elements * dmul;
425 }
426
427 while (location < location_limit) {
428 unsigned i = component;
429 while (i < last_comp) {
430 if (explicit_locations[location][i] != NULL) {
431 linker_error(prog,
432 "%s shader has multiple outputs explicitly "
433 "assigned to location %d and component %d\n",
434 _mesa_shader_stage_to_string(stage),
435 location, component);
436 return false;
437 }
438
439 /* Make sure all component at this location have the same type.
440 */
441 for (unsigned j = 0; j < 4; j++) {
442 if (explicit_locations[location][j] &&
443 (explicit_locations[location][j]->type->without_array()
444 ->base_type != type->without_array()->base_type)) {
445 linker_error(prog,
446 "Varyings sharing the same location must "
447 "have the same underlying numerical type. "
448 "Location %u component %u\n", location, component);
449 return false;
450 }
451 }
452
453 explicit_locations[location][i] = var;
454 i++;
455
456 /* We need to do some special handling for doubles as dvec3 and
457 * dvec4 consume two consecutive locations. We don't need to
458 * worry about components beginning at anything other than 0 as
459 * the spec does not allow this for dvec3 and dvec4.
460 */
461 if (i == 4 && last_comp > 4) {
462 last_comp = last_comp - 4;
463 /* Bump location index and reset the component index */
464 location++;
465 i = 0;
466 }
467 }
468
469 location++;
470 }
471
472 return true;
473 }
474
475 /**
476 * Validate that outputs from one stage match inputs of another
477 */
478 void
479 cross_validate_outputs_to_inputs(struct gl_context *ctx,
480 struct gl_shader_program *prog,
481 gl_linked_shader *producer,
482 gl_linked_shader *consumer)
483 {
484 glsl_symbol_table parameters;
485 ir_variable *explicit_locations[MAX_VARYINGS_INCL_PATCH][4] =
486 { {NULL, NULL} };
487
488 /* Find all shader outputs in the "producer" stage.
489 */
490 foreach_in_list(ir_instruction, node, producer->ir) {
491 ir_variable *const var = node->as_variable();
492
493 if (var == NULL || var->data.mode != ir_var_shader_out)
494 continue;
495
496 if (!var->data.explicit_location
497 || var->data.location < VARYING_SLOT_VAR0)
498 parameters.add_variable(var);
499 else {
500 /* User-defined varyings with explicit locations are handled
501 * differently because they do not need to have matching names.
502 */
503 const glsl_type *type = get_varying_type(var, producer->Stage);
504 unsigned num_elements = type->count_attribute_slots(false);
505 unsigned idx = compute_variable_location_slot(var, producer->Stage);
506 unsigned slot_limit = idx + num_elements;
507
508 unsigned slot_max =
509 ctx->Const.Program[producer->Stage].MaxOutputComponents / 4;
510 if (slot_limit > slot_max) {
511 linker_error(prog,
512 "Invalid location %u in %s shader\n",
513 idx, _mesa_shader_stage_to_string(producer->Stage));
514 return;
515 }
516
517 if (!check_location_aliasing(explicit_locations, var, idx,
518 var->data.location_frac, slot_limit,
519 type, prog, producer->Stage)) {
520 return;
521 }
522 }
523 }
524
525
526 /* Find all shader inputs in the "consumer" stage. Any variables that have
527 * matching outputs already in the symbol table must have the same type and
528 * qualifiers.
529 *
530 * Exception: if the consumer is the geometry shader, then the inputs
531 * should be arrays and the type of the array element should match the type
532 * of the corresponding producer output.
533 */
534 foreach_in_list(ir_instruction, node, consumer->ir) {
535 ir_variable *const input = node->as_variable();
536
537 if (input == NULL || input->data.mode != ir_var_shader_in)
538 continue;
539
540 if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
541 const ir_variable *const front_color =
542 parameters.get_variable("gl_FrontColor");
543
544 const ir_variable *const back_color =
545 parameters.get_variable("gl_BackColor");
546
547 cross_validate_front_and_back_color(prog, input,
548 front_color, back_color,
549 consumer->Stage, producer->Stage);
550 } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
551 const ir_variable *const front_color =
552 parameters.get_variable("gl_FrontSecondaryColor");
553
554 const ir_variable *const back_color =
555 parameters.get_variable("gl_BackSecondaryColor");
556
557 cross_validate_front_and_back_color(prog, input,
558 front_color, back_color,
559 consumer->Stage, producer->Stage);
560 } else {
561 /* The rules for connecting inputs and outputs change in the presence
562 * of explicit locations. In this case, we no longer care about the
563 * names of the variables. Instead, we care only about the
564 * explicitly assigned location.
565 */
566 ir_variable *output = NULL;
567 if (input->data.explicit_location
568 && input->data.location >= VARYING_SLOT_VAR0) {
569
570 const glsl_type *type = get_varying_type(input, consumer->Stage);
571 unsigned num_elements = type->count_attribute_slots(false);
572 unsigned idx =
573 compute_variable_location_slot(input, consumer->Stage);
574 unsigned slot_limit = idx + num_elements;
575
576 while (idx < slot_limit) {
577 if (idx >= MAX_VARYING) {
578 linker_error(prog,
579 "Invalid location %u in %s shader\n", idx,
580 _mesa_shader_stage_to_string(consumer->Stage));
581 return;
582 }
583
584 output = explicit_locations[idx][input->data.location_frac];
585
586 if (output == NULL ||
587 input->data.location != output->data.location) {
588 linker_error(prog,
589 "%s shader input `%s' with explicit location "
590 "has no matching output\n",
591 _mesa_shader_stage_to_string(consumer->Stage),
592 input->name);
593 break;
594 }
595 idx++;
596 }
597 } else {
598 output = parameters.get_variable(input->name);
599 }
600
601 if (output != NULL) {
602 /* Interface blocks have their own validation elsewhere so don't
603 * try validating them here.
604 */
605 if (!(input->get_interface_type() &&
606 output->get_interface_type()))
607 cross_validate_types_and_qualifiers(prog, input, output,
608 consumer->Stage,
609 producer->Stage);
610 } else {
611 /* Check for input vars with unmatched output vars in prev stage
612 * taking into account that interface blocks could have a matching
613 * output but with different name, so we ignore them.
614 */
615 assert(!input->data.assigned);
616 if (input->data.used && !input->get_interface_type() &&
617 !input->data.explicit_location && !prog->SeparateShader)
618 linker_error(prog,
619 "%s shader input `%s' "
620 "has no matching output in the previous stage\n",
621 _mesa_shader_stage_to_string(consumer->Stage),
622 input->name);
623 }
624 }
625 }
626 }
627
628 /**
629 * Demote shader inputs and outputs that are not used in other stages, and
630 * remove them via dead code elimination.
631 */
632 static void
633 remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
634 gl_linked_shader *sh,
635 enum ir_variable_mode mode)
636 {
637 if (is_separate_shader_object)
638 return;
639
640 foreach_in_list(ir_instruction, node, sh->ir) {
641 ir_variable *const var = node->as_variable();
642
643 if (var == NULL || var->data.mode != int(mode))
644 continue;
645
646 /* A shader 'in' or 'out' variable is only really an input or output if
647 * its value is used by other shader stages. This will cause the
648 * variable to have a location assigned.
649 */
650 if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
651 assert(var->data.mode != ir_var_temporary);
652
653 /* Assign zeros to demoted inputs to allow more optimizations. */
654 if (var->data.mode == ir_var_shader_in && !var->constant_value)
655 var->constant_value = ir_constant::zero(var, var->type);
656
657 var->data.mode = ir_var_auto;
658 }
659 }
660
661 /* Eliminate code that is now dead due to unused inputs/outputs being
662 * demoted.
663 */
664 while (do_dead_code(sh->ir, false))
665 ;
666
667 }
668
669 /**
670 * Initialize this object based on a string that was passed to
671 * glTransformFeedbackVaryings.
672 *
673 * If the input is mal-formed, this call still succeeds, but it sets
674 * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
675 * will fail to find any matching variable.
676 */
677 void
678 tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
679 const char *input)
680 {
681 /* We don't have to be pedantic about what is a valid GLSL variable name,
682 * because any variable with an invalid name can't exist in the IR anyway.
683 */
684
685 this->location = -1;
686 this->orig_name = input;
687 this->lowered_builtin_array_variable = none;
688 this->skip_components = 0;
689 this->next_buffer_separator = false;
690 this->matched_candidate = NULL;
691 this->stream_id = 0;
692 this->buffer = 0;
693 this->offset = 0;
694
695 if (ctx->Extensions.ARB_transform_feedback3) {
696 /* Parse gl_NextBuffer. */
697 if (strcmp(input, "gl_NextBuffer") == 0) {
698 this->next_buffer_separator = true;
699 return;
700 }
701
702 /* Parse gl_SkipComponents. */
703 if (strcmp(input, "gl_SkipComponents1") == 0)
704 this->skip_components = 1;
705 else if (strcmp(input, "gl_SkipComponents2") == 0)
706 this->skip_components = 2;
707 else if (strcmp(input, "gl_SkipComponents3") == 0)
708 this->skip_components = 3;
709 else if (strcmp(input, "gl_SkipComponents4") == 0)
710 this->skip_components = 4;
711
712 if (this->skip_components)
713 return;
714 }
715
716 /* Parse a declaration. */
717 const char *base_name_end;
718 long subscript = parse_program_resource_name(input, &base_name_end);
719 this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
720 if (this->var_name == NULL) {
721 _mesa_error_no_memory(__func__);
722 return;
723 }
724
725 if (subscript >= 0) {
726 this->array_subscript = subscript;
727 this->is_subscripted = true;
728 } else {
729 this->is_subscripted = false;
730 }
731
732 /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
733 * class must behave specially to account for the fact that gl_ClipDistance
734 * is converted from a float[8] to a vec4[2].
735 */
736 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
737 strcmp(this->var_name, "gl_ClipDistance") == 0) {
738 this->lowered_builtin_array_variable = clip_distance;
739 }
740 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
741 strcmp(this->var_name, "gl_CullDistance") == 0) {
742 this->lowered_builtin_array_variable = cull_distance;
743 }
744
745 if (ctx->Const.LowerTessLevel &&
746 (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
747 this->lowered_builtin_array_variable = tess_level_outer;
748 if (ctx->Const.LowerTessLevel &&
749 (strcmp(this->var_name, "gl_TessLevelInner") == 0))
750 this->lowered_builtin_array_variable = tess_level_inner;
751 }
752
753
754 /**
755 * Determine whether two tfeedback_decl objects refer to the same variable and
756 * array index (if applicable).
757 */
758 bool
759 tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
760 {
761 assert(x.is_varying() && y.is_varying());
762
763 if (strcmp(x.var_name, y.var_name) != 0)
764 return false;
765 if (x.is_subscripted != y.is_subscripted)
766 return false;
767 if (x.is_subscripted && x.array_subscript != y.array_subscript)
768 return false;
769 return true;
770 }
771
772
773 /**
774 * Assign a location and stream ID for this tfeedback_decl object based on the
775 * transform feedback candidate found by find_candidate.
776 *
777 * If an error occurs, the error is reported through linker_error() and false
778 * is returned.
779 */
780 bool
781 tfeedback_decl::assign_location(struct gl_context *ctx,
782 struct gl_shader_program *prog)
783 {
784 assert(this->is_varying());
785
786 unsigned fine_location
787 = this->matched_candidate->toplevel_var->data.location * 4
788 + this->matched_candidate->toplevel_var->data.location_frac
789 + this->matched_candidate->offset;
790 const unsigned dmul =
791 this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
792
793 if (this->matched_candidate->type->is_array()) {
794 /* Array variable */
795 const unsigned matrix_cols =
796 this->matched_candidate->type->fields.array->matrix_columns;
797 const unsigned vector_elements =
798 this->matched_candidate->type->fields.array->vector_elements;
799 unsigned actual_array_size;
800 switch (this->lowered_builtin_array_variable) {
801 case clip_distance:
802 actual_array_size = prog->last_vert_prog ?
803 prog->last_vert_prog->info.clip_distance_array_size : 0;
804 break;
805 case cull_distance:
806 actual_array_size = prog->last_vert_prog ?
807 prog->last_vert_prog->info.cull_distance_array_size : 0;
808 break;
809 case tess_level_outer:
810 actual_array_size = 4;
811 break;
812 case tess_level_inner:
813 actual_array_size = 2;
814 break;
815 case none:
816 default:
817 actual_array_size = this->matched_candidate->type->array_size();
818 break;
819 }
820
821 if (this->is_subscripted) {
822 /* Check array bounds. */
823 if (this->array_subscript >= actual_array_size) {
824 linker_error(prog, "Transform feedback varying %s has index "
825 "%i, but the array size is %u.",
826 this->orig_name, this->array_subscript,
827 actual_array_size);
828 return false;
829 }
830 unsigned array_elem_size = this->lowered_builtin_array_variable ?
831 1 : vector_elements * matrix_cols * dmul;
832 fine_location += array_elem_size * this->array_subscript;
833 this->size = 1;
834 } else {
835 this->size = actual_array_size;
836 }
837 this->vector_elements = vector_elements;
838 this->matrix_columns = matrix_cols;
839 if (this->lowered_builtin_array_variable)
840 this->type = GL_FLOAT;
841 else
842 this->type = this->matched_candidate->type->fields.array->gl_type;
843 } else {
844 /* Regular variable (scalar, vector, or matrix) */
845 if (this->is_subscripted) {
846 linker_error(prog, "Transform feedback varying %s requested, "
847 "but %s is not an array.",
848 this->orig_name, this->var_name);
849 return false;
850 }
851 this->size = 1;
852 this->vector_elements = this->matched_candidate->type->vector_elements;
853 this->matrix_columns = this->matched_candidate->type->matrix_columns;
854 this->type = this->matched_candidate->type->gl_type;
855 }
856 this->location = fine_location / 4;
857 this->location_frac = fine_location % 4;
858
859 /* From GL_EXT_transform_feedback:
860 * A program will fail to link if:
861 *
862 * * the total number of components to capture in any varying
863 * variable in <varyings> is greater than the constant
864 * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
865 * buffer mode is SEPARATE_ATTRIBS_EXT;
866 */
867 if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
868 this->num_components() >
869 ctx->Const.MaxTransformFeedbackSeparateComponents) {
870 linker_error(prog, "Transform feedback varying %s exceeds "
871 "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
872 this->orig_name);
873 return false;
874 }
875
876 /* Only transform feedback varyings can be assigned to non-zero streams,
877 * so assign the stream id here.
878 */
879 this->stream_id = this->matched_candidate->toplevel_var->data.stream;
880
881 unsigned array_offset = this->array_subscript * 4 * dmul;
882 unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
883 this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
884 this->offset = this->matched_candidate->toplevel_var->data.offset +
885 array_offset + struct_offset;
886
887 return true;
888 }
889
890
891 unsigned
892 tfeedback_decl::get_num_outputs() const
893 {
894 if (!this->is_varying()) {
895 return 0;
896 }
897 return (this->num_components() + this->location_frac + 3)/4;
898 }
899
900
901 /**
902 * Update gl_transform_feedback_info to reflect this tfeedback_decl.
903 *
904 * If an error occurs, the error is reported through linker_error() and false
905 * is returned.
906 */
907 bool
908 tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
909 struct gl_transform_feedback_info *info,
910 unsigned buffer, unsigned buffer_index,
911 const unsigned max_outputs, bool *explicit_stride,
912 bool has_xfb_qualifiers) const
913 {
914 unsigned xfb_offset = 0;
915 unsigned size = this->size;
916 /* Handle gl_SkipComponents. */
917 if (this->skip_components) {
918 info->Buffers[buffer].Stride += this->skip_components;
919 size = this->skip_components;
920 goto store_varying;
921 }
922
923 if (this->next_buffer_separator) {
924 size = 0;
925 goto store_varying;
926 }
927
928 if (has_xfb_qualifiers) {
929 xfb_offset = this->offset / 4;
930 } else {
931 xfb_offset = info->Buffers[buffer].Stride;
932 }
933 info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
934
935 {
936 unsigned location = this->location;
937 unsigned location_frac = this->location_frac;
938 unsigned num_components = this->num_components();
939 while (num_components > 0) {
940 unsigned output_size = MIN2(num_components, 4 - location_frac);
941 assert((info->NumOutputs == 0 && max_outputs == 0) ||
942 info->NumOutputs < max_outputs);
943
944 /* From the ARB_enhanced_layouts spec:
945 *
946 * "If such a block member or variable is not written during a shader
947 * invocation, the buffer contents at the assigned offset will be
948 * undefined. Even if there are no static writes to a variable or
949 * member that is assigned a transform feedback offset, the space is
950 * still allocated in the buffer and still affects the stride."
951 */
952 if (this->is_varying_written()) {
953 info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
954 info->Outputs[info->NumOutputs].OutputRegister = location;
955 info->Outputs[info->NumOutputs].NumComponents = output_size;
956 info->Outputs[info->NumOutputs].StreamId = stream_id;
957 info->Outputs[info->NumOutputs].OutputBuffer = buffer;
958 info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
959 ++info->NumOutputs;
960 }
961 info->Buffers[buffer].Stream = this->stream_id;
962 xfb_offset += output_size;
963
964 num_components -= output_size;
965 location++;
966 location_frac = 0;
967 }
968 }
969
970 if (explicit_stride && explicit_stride[buffer]) {
971 if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
972 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
973 "multiple of 8 as its applied to a type that is or "
974 "contains a double.",
975 info->Buffers[buffer].Stride * 4);
976 return false;
977 }
978
979 if ((this->offset / 4) / info->Buffers[buffer].Stride !=
980 (xfb_offset - 1) / info->Buffers[buffer].Stride) {
981 linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
982 "buffer (%d)", xfb_offset * 4,
983 info->Buffers[buffer].Stride * 4, buffer);
984 return false;
985 }
986 } else {
987 info->Buffers[buffer].Stride = xfb_offset;
988 }
989
990 /* From GL_EXT_transform_feedback:
991 * A program will fail to link if:
992 *
993 * * the total number of components to capture is greater than
994 * the constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
995 * and the buffer mode is INTERLEAVED_ATTRIBS_EXT.
996 *
997 * From GL_ARB_enhanced_layouts:
998 *
999 * "The resulting stride (implicit or explicit) must be less than or
1000 * equal to the implementation-dependent constant
1001 * gl_MaxTransformFeedbackInterleavedComponents."
1002 */
1003 if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
1004 has_xfb_qualifiers) &&
1005 info->Buffers[buffer].Stride >
1006 ctx->Const.MaxTransformFeedbackInterleavedComponents) {
1007 linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1008 "limit has been exceeded.");
1009 return false;
1010 }
1011
1012 store_varying:
1013 info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
1014 this->orig_name);
1015 info->Varyings[info->NumVarying].Type = this->type;
1016 info->Varyings[info->NumVarying].Size = size;
1017 info->Varyings[info->NumVarying].BufferIndex = buffer_index;
1018 info->NumVarying++;
1019 info->Buffers[buffer].NumVaryings++;
1020
1021 return true;
1022 }
1023
1024
1025 const tfeedback_candidate *
1026 tfeedback_decl::find_candidate(gl_shader_program *prog,
1027 hash_table *tfeedback_candidates)
1028 {
1029 const char *name = this->var_name;
1030 switch (this->lowered_builtin_array_variable) {
1031 case none:
1032 name = this->var_name;
1033 break;
1034 case clip_distance:
1035 name = "gl_ClipDistanceMESA";
1036 break;
1037 case cull_distance:
1038 name = "gl_CullDistanceMESA";
1039 break;
1040 case tess_level_outer:
1041 name = "gl_TessLevelOuterMESA";
1042 break;
1043 case tess_level_inner:
1044 name = "gl_TessLevelInnerMESA";
1045 break;
1046 }
1047 hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
1048
1049 this->matched_candidate = entry ?
1050 (const tfeedback_candidate *) entry->data : NULL;
1051
1052 if (!this->matched_candidate) {
1053 /* From GL_EXT_transform_feedback:
1054 * A program will fail to link if:
1055 *
1056 * * any variable name specified in the <varyings> array is not
1057 * declared as an output in the geometry shader (if present) or
1058 * the vertex shader (if no geometry shader is present);
1059 */
1060 linker_error(prog, "Transform feedback varying %s undeclared.",
1061 this->orig_name);
1062 }
1063
1064 return this->matched_candidate;
1065 }
1066
1067
1068 /**
1069 * Parse all the transform feedback declarations that were passed to
1070 * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
1071 *
1072 * If an error occurs, the error is reported through linker_error() and false
1073 * is returned.
1074 */
1075 static bool
1076 parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
1077 const void *mem_ctx, unsigned num_names,
1078 char **varying_names, tfeedback_decl *decls)
1079 {
1080 for (unsigned i = 0; i < num_names; ++i) {
1081 decls[i].init(ctx, mem_ctx, varying_names[i]);
1082
1083 if (!decls[i].is_varying())
1084 continue;
1085
1086 /* From GL_EXT_transform_feedback:
1087 * A program will fail to link if:
1088 *
1089 * * any two entries in the <varyings> array specify the same varying
1090 * variable;
1091 *
1092 * We interpret this to mean "any two entries in the <varyings> array
1093 * specify the same varying variable and array index", since transform
1094 * feedback of arrays would be useless otherwise.
1095 */
1096 for (unsigned j = 0; j < i; ++j) {
1097 if (!decls[j].is_varying())
1098 continue;
1099
1100 if (tfeedback_decl::is_same(decls[i], decls[j])) {
1101 linker_error(prog, "Transform feedback varying %s specified "
1102 "more than once.", varying_names[i]);
1103 return false;
1104 }
1105 }
1106 }
1107 return true;
1108 }
1109
1110
1111 static int
1112 cmp_xfb_offset(const void * x_generic, const void * y_generic)
1113 {
1114 tfeedback_decl *x = (tfeedback_decl *) x_generic;
1115 tfeedback_decl *y = (tfeedback_decl *) y_generic;
1116
1117 if (x->get_buffer() != y->get_buffer())
1118 return x->get_buffer() - y->get_buffer();
1119 return x->get_offset() - y->get_offset();
1120 }
1121
1122 /**
1123 * Store transform feedback location assignments into
1124 * prog->sh.LinkedTransformFeedback based on the data stored in
1125 * tfeedback_decls.
1126 *
1127 * If an error occurs, the error is reported through linker_error() and false
1128 * is returned.
1129 */
1130 static bool
1131 store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
1132 unsigned num_tfeedback_decls,
1133 tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers)
1134 {
1135 if (!prog->last_vert_prog)
1136 return true;
1137
1138 /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
1139 * tracking the number of buffers doesn't overflow.
1140 */
1141 assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
1142
1143 bool separate_attribs_mode =
1144 prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
1145
1146 struct gl_program *xfb_prog = prog->last_vert_prog;
1147 xfb_prog->sh.LinkedTransformFeedback =
1148 rzalloc(xfb_prog, struct gl_transform_feedback_info);
1149
1150 /* The xfb_offset qualifier does not have to be used in increasing order
1151 * however some drivers expect to receive the list of transform feedback
1152 * declarations in order so sort it now for convenience.
1153 */
1154 if (has_xfb_qualifiers)
1155 qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
1156 cmp_xfb_offset);
1157
1158 xfb_prog->sh.LinkedTransformFeedback->Varyings =
1159 rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
1160 num_tfeedback_decls);
1161
1162 unsigned num_outputs = 0;
1163 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1164 if (tfeedback_decls[i].is_varying_written())
1165 num_outputs += tfeedback_decls[i].get_num_outputs();
1166 }
1167
1168 xfb_prog->sh.LinkedTransformFeedback->Outputs =
1169 rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
1170 num_outputs);
1171
1172 unsigned num_buffers = 0;
1173 unsigned buffers = 0;
1174
1175 if (!has_xfb_qualifiers && separate_attribs_mode) {
1176 /* GL_SEPARATE_ATTRIBS */
1177 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1178 if (!tfeedback_decls[i].store(ctx, prog,
1179 xfb_prog->sh.LinkedTransformFeedback,
1180 num_buffers, num_buffers, num_outputs,
1181 NULL, has_xfb_qualifiers))
1182 return false;
1183
1184 buffers |= 1 << num_buffers;
1185 num_buffers++;
1186 }
1187 }
1188 else {
1189 /* GL_INVERLEAVED_ATTRIBS */
1190 int buffer_stream_id = -1;
1191 unsigned buffer =
1192 num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
1193 bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
1194
1195 /* Apply any xfb_stride global qualifiers */
1196 if (has_xfb_qualifiers) {
1197 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1198 if (prog->TransformFeedback.BufferStride[j]) {
1199 buffers |= 1 << j;
1200 explicit_stride[j] = true;
1201 xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
1202 prog->TransformFeedback.BufferStride[j] / 4;
1203 }
1204 }
1205 }
1206
1207 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1208 if (has_xfb_qualifiers &&
1209 buffer != tfeedback_decls[i].get_buffer()) {
1210 /* we have moved to the next buffer so reset stream id */
1211 buffer_stream_id = -1;
1212 num_buffers++;
1213 }
1214
1215 if (tfeedback_decls[i].is_next_buffer_separator()) {
1216 if (!tfeedback_decls[i].store(ctx, prog,
1217 xfb_prog->sh.LinkedTransformFeedback,
1218 buffer, num_buffers, num_outputs,
1219 explicit_stride, has_xfb_qualifiers))
1220 return false;
1221 num_buffers++;
1222 buffer_stream_id = -1;
1223 continue;
1224 } else if (tfeedback_decls[i].is_varying()) {
1225 if (buffer_stream_id == -1) {
1226 /* First varying writing to this buffer: remember its stream */
1227 buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
1228 } else if (buffer_stream_id !=
1229 (int) tfeedback_decls[i].get_stream_id()) {
1230 /* Varying writes to the same buffer from a different stream */
1231 linker_error(prog,
1232 "Transform feedback can't capture varyings belonging "
1233 "to different vertex streams in a single buffer. "
1234 "Varying %s writes to buffer from stream %u, other "
1235 "varyings in the same buffer write from stream %u.",
1236 tfeedback_decls[i].name(),
1237 tfeedback_decls[i].get_stream_id(),
1238 buffer_stream_id);
1239 return false;
1240 }
1241 }
1242
1243 if (has_xfb_qualifiers) {
1244 buffer = tfeedback_decls[i].get_buffer();
1245 } else {
1246 buffer = num_buffers;
1247 }
1248 buffers |= 1 << buffer;
1249
1250 if (!tfeedback_decls[i].store(ctx, prog,
1251 xfb_prog->sh.LinkedTransformFeedback,
1252 buffer, num_buffers, num_outputs,
1253 explicit_stride, has_xfb_qualifiers))
1254 return false;
1255 }
1256 }
1257
1258 assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
1259
1260 xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
1261 return true;
1262 }
1263
1264 namespace {
1265
1266 /**
1267 * Data structure recording the relationship between outputs of one shader
1268 * stage (the "producer") and inputs of another (the "consumer").
1269 */
1270 class varying_matches
1271 {
1272 public:
1273 varying_matches(bool disable_varying_packing, bool xfb_enabled,
1274 bool enhanced_layouts_enabled,
1275 gl_shader_stage producer_stage,
1276 gl_shader_stage consumer_stage);
1277 ~varying_matches();
1278 void record(ir_variable *producer_var, ir_variable *consumer_var);
1279 unsigned assign_locations(struct gl_shader_program *prog,
1280 uint8_t *components,
1281 uint64_t reserved_slots);
1282 void store_locations() const;
1283
1284 private:
1285 bool is_varying_packing_safe(const glsl_type *type,
1286 const ir_variable *var);
1287
1288 /**
1289 * If true, this driver disables varying packing, so all varyings need to
1290 * be aligned on slot boundaries, and take up a number of slots equal to
1291 * their number of matrix columns times their array size.
1292 *
1293 * Packing may also be disabled because our current packing method is not
1294 * safe in SSO or versions of OpenGL where interpolation qualifiers are not
1295 * guaranteed to match across stages.
1296 */
1297 const bool disable_varying_packing;
1298
1299 /**
1300 * If true, this driver has transform feedback enabled. The transform
1301 * feedback code requires at least some packing be done even when varying
1302 * packing is disabled, fortunately where transform feedback requires
1303 * packing it's safe to override the disabled setting. See
1304 * is_varying_packing_safe().
1305 */
1306 const bool xfb_enabled;
1307
1308 const bool enhanced_layouts_enabled;
1309
1310 /**
1311 * Enum representing the order in which varyings are packed within a
1312 * packing class.
1313 *
1314 * Currently we pack vec4's first, then vec2's, then scalar values, then
1315 * vec3's. This order ensures that the only vectors that are at risk of
1316 * having to be "double parked" (split between two adjacent varying slots)
1317 * are the vec3's.
1318 */
1319 enum packing_order_enum {
1320 PACKING_ORDER_VEC4,
1321 PACKING_ORDER_VEC2,
1322 PACKING_ORDER_SCALAR,
1323 PACKING_ORDER_VEC3,
1324 };
1325
1326 static unsigned compute_packing_class(const ir_variable *var);
1327 static packing_order_enum compute_packing_order(const ir_variable *var);
1328 static int match_comparator(const void *x_generic, const void *y_generic);
1329 static int xfb_comparator(const void *x_generic, const void *y_generic);
1330
1331 /**
1332 * Structure recording the relationship between a single producer output
1333 * and a single consumer input.
1334 */
1335 struct match {
1336 /**
1337 * Packing class for this varying, computed by compute_packing_class().
1338 */
1339 unsigned packing_class;
1340
1341 /**
1342 * Packing order for this varying, computed by compute_packing_order().
1343 */
1344 packing_order_enum packing_order;
1345 unsigned num_components;
1346
1347 /**
1348 * The output variable in the producer stage.
1349 */
1350 ir_variable *producer_var;
1351
1352 /**
1353 * The input variable in the consumer stage.
1354 */
1355 ir_variable *consumer_var;
1356
1357 /**
1358 * The location which has been assigned for this varying. This is
1359 * expressed in multiples of a float, with the first generic varying
1360 * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
1361 * value 0.
1362 */
1363 unsigned generic_location;
1364 } *matches;
1365
1366 /**
1367 * The number of elements in the \c matches array that are currently in
1368 * use.
1369 */
1370 unsigned num_matches;
1371
1372 /**
1373 * The number of elements that were set aside for the \c matches array when
1374 * it was allocated.
1375 */
1376 unsigned matches_capacity;
1377
1378 gl_shader_stage producer_stage;
1379 gl_shader_stage consumer_stage;
1380 };
1381
1382 } /* anonymous namespace */
1383
1384 varying_matches::varying_matches(bool disable_varying_packing,
1385 bool xfb_enabled,
1386 bool enhanced_layouts_enabled,
1387 gl_shader_stage producer_stage,
1388 gl_shader_stage consumer_stage)
1389 : disable_varying_packing(disable_varying_packing),
1390 xfb_enabled(xfb_enabled),
1391 enhanced_layouts_enabled(enhanced_layouts_enabled),
1392 producer_stage(producer_stage),
1393 consumer_stage(consumer_stage)
1394 {
1395 /* Note: this initial capacity is rather arbitrarily chosen to be large
1396 * enough for many cases without wasting an unreasonable amount of space.
1397 * varying_matches::record() will resize the array if there are more than
1398 * this number of varyings.
1399 */
1400 this->matches_capacity = 8;
1401 this->matches = (match *)
1402 malloc(sizeof(*this->matches) * this->matches_capacity);
1403 this->num_matches = 0;
1404 }
1405
1406
1407 varying_matches::~varying_matches()
1408 {
1409 free(this->matches);
1410 }
1411
1412
1413 /**
1414 * Packing is always safe on individual arrays, structures, and matrices. It
1415 * is also safe if the varying is only used for transform feedback.
1416 */
1417 bool
1418 varying_matches::is_varying_packing_safe(const glsl_type *type,
1419 const ir_variable *var)
1420 {
1421 if (consumer_stage == MESA_SHADER_TESS_EVAL ||
1422 consumer_stage == MESA_SHADER_TESS_CTRL ||
1423 producer_stage == MESA_SHADER_TESS_CTRL)
1424 return false;
1425
1426 return xfb_enabled && (type->is_array() || type->is_record() ||
1427 type->is_matrix() || var->data.is_xfb_only);
1428 }
1429
1430
1431 /**
1432 * Record the given producer/consumer variable pair in the list of variables
1433 * that should later be assigned locations.
1434 *
1435 * It is permissible for \c consumer_var to be NULL (this happens if a
1436 * variable is output by the producer and consumed by transform feedback, but
1437 * not consumed by the consumer).
1438 *
1439 * If \c producer_var has already been paired up with a consumer_var, or
1440 * producer_var is part of fixed pipeline functionality (and hence already has
1441 * a location assigned), this function has no effect.
1442 *
1443 * Note: as a side effect this function may change the interpolation type of
1444 * \c producer_var, but only when the change couldn't possibly affect
1445 * rendering.
1446 */
1447 void
1448 varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
1449 {
1450 assert(producer_var != NULL || consumer_var != NULL);
1451
1452 if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
1453 producer_var->data.explicit_location)) ||
1454 (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
1455 consumer_var->data.explicit_location))) {
1456 /* Either a location already exists for this variable (since it is part
1457 * of fixed functionality), or it has already been recorded as part of a
1458 * previous match.
1459 */
1460 return;
1461 }
1462
1463 bool needs_flat_qualifier = consumer_var == NULL &&
1464 (producer_var->type->contains_integer() ||
1465 producer_var->type->contains_double());
1466
1467 if (!disable_varying_packing &&
1468 (needs_flat_qualifier ||
1469 (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
1470 /* Since this varying is not being consumed by the fragment shader, its
1471 * interpolation type varying cannot possibly affect rendering.
1472 * Also, this variable is non-flat and is (or contains) an integer
1473 * or a double.
1474 * If the consumer stage is unknown, don't modify the interpolation
1475 * type as it could affect rendering later with separate shaders.
1476 *
1477 * lower_packed_varyings requires all integer varyings to flat,
1478 * regardless of where they appear. We can trivially satisfy that
1479 * requirement by changing the interpolation type to flat here.
1480 */
1481 if (producer_var) {
1482 producer_var->data.centroid = false;
1483 producer_var->data.sample = false;
1484 producer_var->data.interpolation = INTERP_MODE_FLAT;
1485 }
1486
1487 if (consumer_var) {
1488 consumer_var->data.centroid = false;
1489 consumer_var->data.sample = false;
1490 consumer_var->data.interpolation = INTERP_MODE_FLAT;
1491 }
1492 }
1493
1494 if (this->num_matches == this->matches_capacity) {
1495 this->matches_capacity *= 2;
1496 this->matches = (match *)
1497 realloc(this->matches,
1498 sizeof(*this->matches) * this->matches_capacity);
1499 }
1500
1501 /* We must use the consumer to compute the packing class because in GL4.4+
1502 * there is no guarantee interpolation qualifiers will match across stages.
1503 *
1504 * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
1505 *
1506 * "The type and presence of interpolation qualifiers of variables with
1507 * the same name declared in all linked shaders for the same cross-stage
1508 * interface must match, otherwise the link command will fail.
1509 *
1510 * When comparing an output from one stage to an input of a subsequent
1511 * stage, the input and output don't match if their interpolation
1512 * qualifiers (or lack thereof) are not the same."
1513 *
1514 * This text was also in at least revison 7 of the 4.40 spec but is no
1515 * longer in revision 9 and not in the 4.50 spec.
1516 */
1517 const ir_variable *const var = (consumer_var != NULL)
1518 ? consumer_var : producer_var;
1519 const gl_shader_stage stage = (consumer_var != NULL)
1520 ? consumer_stage : producer_stage;
1521 const glsl_type *type = get_varying_type(var, stage);
1522
1523 if (producer_var && consumer_var &&
1524 consumer_var->data.must_be_shader_input) {
1525 producer_var->data.must_be_shader_input = 1;
1526 }
1527
1528 this->matches[this->num_matches].packing_class
1529 = this->compute_packing_class(var);
1530 this->matches[this->num_matches].packing_order
1531 = this->compute_packing_order(var);
1532 if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
1533 var->data.must_be_shader_input) {
1534 unsigned slots = type->count_attribute_slots(false);
1535 this->matches[this->num_matches].num_components = slots * 4;
1536 } else {
1537 this->matches[this->num_matches].num_components
1538 = type->component_slots();
1539 }
1540
1541 this->matches[this->num_matches].producer_var = producer_var;
1542 this->matches[this->num_matches].consumer_var = consumer_var;
1543 this->num_matches++;
1544 if (producer_var)
1545 producer_var->data.is_unmatched_generic_inout = 0;
1546 if (consumer_var)
1547 consumer_var->data.is_unmatched_generic_inout = 0;
1548 }
1549
1550
1551 /**
1552 * Choose locations for all of the variable matches that were previously
1553 * passed to varying_matches::record().
1554 */
1555 unsigned
1556 varying_matches::assign_locations(struct gl_shader_program *prog,
1557 uint8_t *components,
1558 uint64_t reserved_slots)
1559 {
1560 /* If packing has been disabled then we cannot safely sort the varyings by
1561 * class as it may mean we are using a version of OpenGL where
1562 * interpolation qualifiers are not guaranteed to be matching across
1563 * shaders, sorting in this case could result in mismatching shader
1564 * interfaces.
1565 * When packing is disabled the sort orders varyings used by transform
1566 * feedback first, but also depends on *undefined behaviour* of qsort to
1567 * reverse the order of the varyings. See: xfb_comparator().
1568 */
1569 if (!this->disable_varying_packing) {
1570 /* Sort varying matches into an order that makes them easy to pack. */
1571 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1572 &varying_matches::match_comparator);
1573 } else {
1574 /* Only sort varyings that are only used by transform feedback. */
1575 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1576 &varying_matches::xfb_comparator);
1577 }
1578
1579 unsigned generic_location = 0;
1580 unsigned generic_patch_location = MAX_VARYING*4;
1581 bool previous_var_xfb_only = false;
1582
1583 for (unsigned i = 0; i < this->num_matches; i++) {
1584 unsigned *location = &generic_location;
1585
1586 const ir_variable *var;
1587 const glsl_type *type;
1588 bool is_vertex_input = false;
1589 if (matches[i].consumer_var) {
1590 var = matches[i].consumer_var;
1591 type = get_varying_type(var, consumer_stage);
1592 if (consumer_stage == MESA_SHADER_VERTEX)
1593 is_vertex_input = true;
1594 } else {
1595 var = matches[i].producer_var;
1596 type = get_varying_type(var, producer_stage);
1597 }
1598
1599 if (var->data.patch)
1600 location = &generic_patch_location;
1601
1602 /* Advance to the next slot if this varying has a different packing
1603 * class than the previous one, and we're not already on a slot
1604 * boundary.
1605 *
1606 * Also advance to the next slot if packing is disabled. This makes sure
1607 * we don't assign varyings the same locations which is possible
1608 * because we still pack individual arrays, records and matrices even
1609 * when packing is disabled. Note we don't advance to the next slot if
1610 * we can pack varyings together that are only used for transform
1611 * feedback.
1612 */
1613 if (var->data.must_be_shader_input ||
1614 (this->disable_varying_packing &&
1615 !(previous_var_xfb_only && var->data.is_xfb_only)) ||
1616 (i > 0 && this->matches[i - 1].packing_class
1617 != this->matches[i].packing_class )) {
1618 *location = ALIGN(*location, 4);
1619 }
1620
1621 previous_var_xfb_only = var->data.is_xfb_only;
1622
1623 /* The number of components taken up by this variable. For vertex shader
1624 * inputs, we use the number of slots * 4, as they have different
1625 * counting rules.
1626 */
1627 unsigned num_components = is_vertex_input ?
1628 type->count_attribute_slots(is_vertex_input) * 4 :
1629 this->matches[i].num_components;
1630
1631 /* The last slot for this variable, inclusive. */
1632 unsigned slot_end = *location + num_components - 1;
1633
1634 /* FIXME: We could be smarter in the below code and loop back over
1635 * trying to fill any locations that we skipped because we couldn't pack
1636 * the varying between an explicit location. For now just let the user
1637 * hit the linking error if we run out of room and suggest they use
1638 * explicit locations.
1639 */
1640 while (slot_end < MAX_VARYING * 4u) {
1641 const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
1642 const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
1643
1644 assert(slots > 0);
1645 if (reserved_slots & slot_mask) {
1646 *location = ALIGN(*location + 1, 4);
1647 slot_end = *location + num_components - 1;
1648 continue;
1649 }
1650
1651 break;
1652 }
1653
1654 if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
1655 linker_error(prog, "insufficient contiguous locations available for "
1656 "%s it is possible an array or struct could not be "
1657 "packed between varyings with explicit locations. Try "
1658 "using an explicit location for arrays and structs.",
1659 var->name);
1660 }
1661
1662 if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
1663 for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
1664 components[j] = 4;
1665 components[slot_end / 4u] = (slot_end & 3) + 1;
1666 }
1667
1668 this->matches[i].generic_location = *location;
1669
1670 *location = slot_end + 1;
1671 }
1672
1673 return (generic_location + 3) / 4;
1674 }
1675
1676
1677 /**
1678 * Update the producer and consumer shaders to reflect the locations
1679 * assignments that were made by varying_matches::assign_locations().
1680 */
1681 void
1682 varying_matches::store_locations() const
1683 {
1684 /* Check is location needs to be packed with lower_packed_varyings() or if
1685 * we can just use ARB_enhanced_layouts packing.
1686 */
1687 bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
1688 const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
1689
1690 for (unsigned i = 0; i < this->num_matches; i++) {
1691 ir_variable *producer_var = this->matches[i].producer_var;
1692 ir_variable *consumer_var = this->matches[i].consumer_var;
1693 unsigned generic_location = this->matches[i].generic_location;
1694 unsigned slot = generic_location / 4;
1695 unsigned offset = generic_location % 4;
1696
1697 if (producer_var) {
1698 producer_var->data.location = VARYING_SLOT_VAR0 + slot;
1699 producer_var->data.location_frac = offset;
1700 }
1701
1702 if (consumer_var) {
1703 assert(consumer_var->data.location == -1);
1704 consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
1705 consumer_var->data.location_frac = offset;
1706 }
1707
1708 /* Find locations suitable for native packing via
1709 * ARB_enhanced_layouts.
1710 */
1711 if (producer_var && consumer_var) {
1712 if (enhanced_layouts_enabled) {
1713 const glsl_type *type =
1714 get_varying_type(producer_var, producer_stage);
1715 if (type->is_array() || type->is_matrix() || type->is_record() ||
1716 type->is_double()) {
1717 unsigned comp_slots = type->component_slots() + offset;
1718 unsigned slots = comp_slots / 4;
1719 if (comp_slots % 4)
1720 slots += 1;
1721
1722 for (unsigned j = 0; j < slots; j++) {
1723 pack_loc[slot + j] = true;
1724 }
1725 } else if (offset + type->vector_elements > 4) {
1726 pack_loc[slot] = true;
1727 pack_loc[slot + 1] = true;
1728 } else {
1729 loc_type[slot][offset] = type;
1730 }
1731 }
1732 }
1733 }
1734
1735 /* Attempt to use ARB_enhanced_layouts for more efficient packing if
1736 * suitable.
1737 */
1738 if (enhanced_layouts_enabled) {
1739 for (unsigned i = 0; i < this->num_matches; i++) {
1740 ir_variable *producer_var = this->matches[i].producer_var;
1741 ir_variable *consumer_var = this->matches[i].consumer_var;
1742 unsigned generic_location = this->matches[i].generic_location;
1743 unsigned slot = generic_location / 4;
1744
1745 if (pack_loc[slot] || !producer_var || !consumer_var)
1746 continue;
1747
1748 const glsl_type *type =
1749 get_varying_type(producer_var, producer_stage);
1750 bool type_match = true;
1751 for (unsigned j = 0; j < 4; j++) {
1752 if (loc_type[slot][j]) {
1753 if (type->base_type != loc_type[slot][j]->base_type)
1754 type_match = false;
1755 }
1756 }
1757
1758 if (type_match) {
1759 producer_var->data.explicit_location = 1;
1760 consumer_var->data.explicit_location = 1;
1761 producer_var->data.explicit_component = 1;
1762 consumer_var->data.explicit_component = 1;
1763 }
1764 }
1765 }
1766 }
1767
1768
1769 /**
1770 * Compute the "packing class" of the given varying. This is an unsigned
1771 * integer with the property that two variables in the same packing class can
1772 * be safely backed into the same vec4.
1773 */
1774 unsigned
1775 varying_matches::compute_packing_class(const ir_variable *var)
1776 {
1777 /* Without help from the back-end, there is no way to pack together
1778 * variables with different interpolation types, because
1779 * lower_packed_varyings must choose exactly one interpolation type for
1780 * each packed varying it creates.
1781 *
1782 * However, we can safely pack together floats, ints, and uints, because:
1783 *
1784 * - varyings of base type "int" and "uint" must use the "flat"
1785 * interpolation type, which can only occur in GLSL 1.30 and above.
1786 *
1787 * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
1788 * can store flat floats as ints without losing any information (using
1789 * the ir_unop_bitcast_* opcodes).
1790 *
1791 * Therefore, the packing class depends only on the interpolation type.
1792 */
1793 unsigned packing_class = var->data.centroid | (var->data.sample << 1) |
1794 (var->data.patch << 2) |
1795 (var->data.must_be_shader_input << 3);
1796 packing_class *= 8;
1797 packing_class += var->is_interpolation_flat()
1798 ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
1799 return packing_class;
1800 }
1801
1802
1803 /**
1804 * Compute the "packing order" of the given varying. This is a sort key we
1805 * use to determine when to attempt to pack the given varying relative to
1806 * other varyings in the same packing class.
1807 */
1808 varying_matches::packing_order_enum
1809 varying_matches::compute_packing_order(const ir_variable *var)
1810 {
1811 const glsl_type *element_type = var->type;
1812
1813 while (element_type->is_array()) {
1814 element_type = element_type->fields.array;
1815 }
1816
1817 switch (element_type->component_slots() % 4) {
1818 case 1: return PACKING_ORDER_SCALAR;
1819 case 2: return PACKING_ORDER_VEC2;
1820 case 3: return PACKING_ORDER_VEC3;
1821 case 0: return PACKING_ORDER_VEC4;
1822 default:
1823 assert(!"Unexpected value of vector_elements");
1824 return PACKING_ORDER_VEC4;
1825 }
1826 }
1827
1828
1829 /**
1830 * Comparison function passed to qsort() to sort varyings by packing_class and
1831 * then by packing_order.
1832 */
1833 int
1834 varying_matches::match_comparator(const void *x_generic, const void *y_generic)
1835 {
1836 const match *x = (const match *) x_generic;
1837 const match *y = (const match *) y_generic;
1838
1839 if (x->packing_class != y->packing_class)
1840 return x->packing_class - y->packing_class;
1841 return x->packing_order - y->packing_order;
1842 }
1843
1844
1845 /**
1846 * Comparison function passed to qsort() to sort varyings used only by
1847 * transform feedback when packing of other varyings is disabled.
1848 */
1849 int
1850 varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
1851 {
1852 const match *x = (const match *) x_generic;
1853
1854 if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
1855 return match_comparator(x_generic, y_generic);
1856
1857 /* FIXME: When the comparator returns 0 it means the elements being
1858 * compared are equivalent. However the qsort documentation says:
1859 *
1860 * "The order of equivalent elements is undefined."
1861 *
1862 * In practice the sort ends up reversing the order of the varyings which
1863 * means locations are also assigned in this reversed order and happens to
1864 * be what we want. This is also whats happening in
1865 * varying_matches::match_comparator().
1866 */
1867 return 0;
1868 }
1869
1870
1871 /**
1872 * Is the given variable a varying variable to be counted against the
1873 * limit in ctx->Const.MaxVarying?
1874 * This includes variables such as texcoords, colors and generic
1875 * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
1876 */
1877 static bool
1878 var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
1879 {
1880 /* Only fragment shaders will take a varying variable as an input */
1881 if (stage == MESA_SHADER_FRAGMENT &&
1882 var->data.mode == ir_var_shader_in) {
1883 switch (var->data.location) {
1884 case VARYING_SLOT_POS:
1885 case VARYING_SLOT_FACE:
1886 case VARYING_SLOT_PNTC:
1887 return false;
1888 default:
1889 return true;
1890 }
1891 }
1892 return false;
1893 }
1894
1895
1896 /**
1897 * Visitor class that generates tfeedback_candidate structs describing all
1898 * possible targets of transform feedback.
1899 *
1900 * tfeedback_candidate structs are stored in the hash table
1901 * tfeedback_candidates, which is passed to the constructor. This hash table
1902 * maps varying names to instances of the tfeedback_candidate struct.
1903 */
1904 class tfeedback_candidate_generator : public program_resource_visitor
1905 {
1906 public:
1907 tfeedback_candidate_generator(void *mem_ctx,
1908 hash_table *tfeedback_candidates)
1909 : mem_ctx(mem_ctx),
1910 tfeedback_candidates(tfeedback_candidates),
1911 toplevel_var(NULL),
1912 varying_floats(0)
1913 {
1914 }
1915
1916 void process(ir_variable *var)
1917 {
1918 /* All named varying interface blocks should be flattened by now */
1919 assert(!var->is_interface_instance());
1920
1921 this->toplevel_var = var;
1922 this->varying_floats = 0;
1923 program_resource_visitor::process(var, false);
1924 }
1925
1926 private:
1927 virtual void visit_field(const glsl_type *type, const char *name,
1928 bool /* row_major */,
1929 const glsl_type * /* record_type */,
1930 const enum glsl_interface_packing,
1931 bool /* last_field */)
1932 {
1933 assert(!type->without_array()->is_record());
1934 assert(!type->without_array()->is_interface());
1935
1936 tfeedback_candidate *candidate
1937 = rzalloc(this->mem_ctx, tfeedback_candidate);
1938 candidate->toplevel_var = this->toplevel_var;
1939 candidate->type = type;
1940 candidate->offset = this->varying_floats;
1941 _mesa_hash_table_insert(this->tfeedback_candidates,
1942 ralloc_strdup(this->mem_ctx, name),
1943 candidate);
1944 this->varying_floats += type->component_slots();
1945 }
1946
1947 /**
1948 * Memory context used to allocate hash table keys and values.
1949 */
1950 void * const mem_ctx;
1951
1952 /**
1953 * Hash table in which tfeedback_candidate objects should be stored.
1954 */
1955 hash_table * const tfeedback_candidates;
1956
1957 /**
1958 * Pointer to the toplevel variable that is being traversed.
1959 */
1960 ir_variable *toplevel_var;
1961
1962 /**
1963 * Total number of varying floats that have been visited so far. This is
1964 * used to determine the offset to each varying within the toplevel
1965 * variable.
1966 */
1967 unsigned varying_floats;
1968 };
1969
1970
1971 namespace linker {
1972
1973 void
1974 populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
1975 hash_table *consumer_inputs,
1976 hash_table *consumer_interface_inputs,
1977 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
1978 {
1979 memset(consumer_inputs_with_locations,
1980 0,
1981 sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
1982
1983 foreach_in_list(ir_instruction, node, ir) {
1984 ir_variable *const input_var = node->as_variable();
1985
1986 if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
1987 /* All interface blocks should have been lowered by this point */
1988 assert(!input_var->type->is_interface());
1989
1990 if (input_var->data.explicit_location) {
1991 /* assign_varying_locations only cares about finding the
1992 * ir_variable at the start of a contiguous location block.
1993 *
1994 * - For !producer, consumer_inputs_with_locations isn't used.
1995 *
1996 * - For !consumer, consumer_inputs_with_locations is empty.
1997 *
1998 * For consumer && producer, if you were trying to set some
1999 * ir_variable to the middle of a location block on the other side
2000 * of producer/consumer, cross_validate_outputs_to_inputs() should
2001 * be link-erroring due to either type mismatch or location
2002 * overlaps. If the variables do match up, then they've got a
2003 * matching data.location and you only looked at
2004 * consumer_inputs_with_locations[var->data.location], not any
2005 * following entries for the array/structure.
2006 */
2007 consumer_inputs_with_locations[input_var->data.location] =
2008 input_var;
2009 } else if (input_var->get_interface_type() != NULL) {
2010 char *const iface_field_name =
2011 ralloc_asprintf(mem_ctx, "%s.%s",
2012 input_var->get_interface_type()->without_array()->name,
2013 input_var->name);
2014 _mesa_hash_table_insert(consumer_interface_inputs,
2015 iface_field_name, input_var);
2016 } else {
2017 _mesa_hash_table_insert(consumer_inputs,
2018 ralloc_strdup(mem_ctx, input_var->name),
2019 input_var);
2020 }
2021 }
2022 }
2023 }
2024
2025 /**
2026 * Find a variable from the consumer that "matches" the specified variable
2027 *
2028 * This function only finds inputs with names that match. There is no
2029 * validation (here) that the types, etc. are compatible.
2030 */
2031 ir_variable *
2032 get_matching_input(void *mem_ctx,
2033 const ir_variable *output_var,
2034 hash_table *consumer_inputs,
2035 hash_table *consumer_interface_inputs,
2036 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2037 {
2038 ir_variable *input_var;
2039
2040 if (output_var->data.explicit_location) {
2041 input_var = consumer_inputs_with_locations[output_var->data.location];
2042 } else if (output_var->get_interface_type() != NULL) {
2043 char *const iface_field_name =
2044 ralloc_asprintf(mem_ctx, "%s.%s",
2045 output_var->get_interface_type()->without_array()->name,
2046 output_var->name);
2047 hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
2048 input_var = entry ? (ir_variable *) entry->data : NULL;
2049 } else {
2050 hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
2051 input_var = entry ? (ir_variable *) entry->data : NULL;
2052 }
2053
2054 return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2055 ? NULL : input_var;
2056 }
2057
2058 }
2059
2060 static int
2061 io_variable_cmp(const void *_a, const void *_b)
2062 {
2063 const ir_variable *const a = *(const ir_variable **) _a;
2064 const ir_variable *const b = *(const ir_variable **) _b;
2065
2066 if (a->data.explicit_location && b->data.explicit_location)
2067 return b->data.location - a->data.location;
2068
2069 if (a->data.explicit_location && !b->data.explicit_location)
2070 return 1;
2071
2072 if (!a->data.explicit_location && b->data.explicit_location)
2073 return -1;
2074
2075 return -strcmp(a->name, b->name);
2076 }
2077
2078 /**
2079 * Sort the shader IO variables into canonical order
2080 */
2081 static void
2082 canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
2083 {
2084 ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
2085 unsigned num_variables = 0;
2086
2087 foreach_in_list(ir_instruction, node, ir) {
2088 ir_variable *const var = node->as_variable();
2089
2090 if (var == NULL || var->data.mode != io_mode)
2091 continue;
2092
2093 /* If we have already encountered more I/O variables that could
2094 * successfully link, bail.
2095 */
2096 if (num_variables == ARRAY_SIZE(var_table))
2097 return;
2098
2099 var_table[num_variables++] = var;
2100 }
2101
2102 if (num_variables == 0)
2103 return;
2104
2105 /* Sort the list in reverse order (io_variable_cmp handles this). Later
2106 * we're going to push the variables on to the IR list as a stack, so we
2107 * want the last variable (in canonical order) to be first in the list.
2108 */
2109 qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
2110
2111 /* Remove the variable from it's current location in the IR, and put it at
2112 * the front.
2113 */
2114 for (unsigned i = 0; i < num_variables; i++) {
2115 var_table[i]->remove();
2116 ir->push_head(var_table[i]);
2117 }
2118 }
2119
2120 /**
2121 * Generate a bitfield map of the explicit locations for shader varyings.
2122 *
2123 * Note: For Tessellation shaders we are sitting right on the limits of the
2124 * 64 bit map. Per-vertex and per-patch both have separate location domains
2125 * with a max of MAX_VARYING.
2126 */
2127 static uint64_t
2128 reserved_varying_slot(struct gl_linked_shader *stage,
2129 ir_variable_mode io_mode)
2130 {
2131 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
2132 /* Avoid an overflow of the returned value */
2133 assert(MAX_VARYINGS_INCL_PATCH <= 64);
2134
2135 uint64_t slots = 0;
2136 int var_slot;
2137
2138 if (!stage)
2139 return slots;
2140
2141 foreach_in_list(ir_instruction, node, stage->ir) {
2142 ir_variable *const var = node->as_variable();
2143
2144 if (var == NULL || var->data.mode != io_mode ||
2145 !var->data.explicit_location ||
2146 var->data.location < VARYING_SLOT_VAR0)
2147 continue;
2148
2149 var_slot = var->data.location - VARYING_SLOT_VAR0;
2150
2151 unsigned num_elements = get_varying_type(var, stage->Stage)
2152 ->count_attribute_slots(io_mode == ir_var_shader_in &&
2153 stage->Stage == MESA_SHADER_VERTEX);
2154 for (unsigned i = 0; i < num_elements; i++) {
2155 if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
2156 slots |= UINT64_C(1) << var_slot;
2157 var_slot += 1;
2158 }
2159 }
2160
2161 return slots;
2162 }
2163
2164
2165 /**
2166 * Assign locations for all variables that are produced in one pipeline stage
2167 * (the "producer") and consumed in the next stage (the "consumer").
2168 *
2169 * Variables produced by the producer may also be consumed by transform
2170 * feedback.
2171 *
2172 * \param num_tfeedback_decls is the number of declarations indicating
2173 * variables that may be consumed by transform feedback.
2174 *
2175 * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
2176 * representing the result of parsing the strings passed to
2177 * glTransformFeedbackVaryings(). assign_location() will be called for
2178 * each of these objects that matches one of the outputs of the
2179 * producer.
2180 *
2181 * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
2182 * be NULL. In this case, varying locations are assigned solely based on the
2183 * requirements of transform feedback.
2184 */
2185 static bool
2186 assign_varying_locations(struct gl_context *ctx,
2187 void *mem_ctx,
2188 struct gl_shader_program *prog,
2189 gl_linked_shader *producer,
2190 gl_linked_shader *consumer,
2191 unsigned num_tfeedback_decls,
2192 tfeedback_decl *tfeedback_decls,
2193 const uint64_t reserved_slots)
2194 {
2195 /* Tessellation shaders treat inputs and outputs as shared memory and can
2196 * access inputs and outputs of other invocations.
2197 * Therefore, they can't be lowered to temps easily (and definitely not
2198 * efficiently).
2199 */
2200 bool unpackable_tess =
2201 (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
2202 (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
2203 (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
2204
2205 /* Transform feedback code assumes varying arrays are packed, so if the
2206 * driver has disabled varying packing, make sure to at least enable
2207 * packing required by transform feedback.
2208 */
2209 bool xfb_enabled =
2210 ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
2211
2212 /* Disable packing on outward facing interfaces for SSO because in ES we
2213 * need to retain the unpacked varying information for draw time
2214 * validation.
2215 *
2216 * Packing is still enabled on individual arrays, structs, and matrices as
2217 * these are required by the transform feedback code and it is still safe
2218 * to do so. We also enable packing when a varying is only used for
2219 * transform feedback and its not a SSO.
2220 */
2221 bool disable_varying_packing =
2222 ctx->Const.DisableVaryingPacking || unpackable_tess;
2223 if (prog->SeparateShader && (producer == NULL || consumer == NULL))
2224 disable_varying_packing = true;
2225
2226 varying_matches matches(disable_varying_packing, xfb_enabled,
2227 ctx->Extensions.ARB_enhanced_layouts,
2228 producer ? producer->Stage : MESA_SHADER_NONE,
2229 consumer ? consumer->Stage : MESA_SHADER_NONE);
2230 hash_table *tfeedback_candidates =
2231 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2232 _mesa_key_string_equal);
2233 hash_table *consumer_inputs =
2234 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2235 _mesa_key_string_equal);
2236 hash_table *consumer_interface_inputs =
2237 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2238 _mesa_key_string_equal);
2239 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
2240 NULL,
2241 };
2242
2243 unsigned consumer_vertices = 0;
2244 if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
2245 consumer_vertices = prog->Geom.VerticesIn;
2246
2247 /* Operate in a total of four passes.
2248 *
2249 * 1. Sort inputs / outputs into a canonical order. This is necessary so
2250 * that inputs / outputs of separable shaders will be assigned
2251 * predictable locations regardless of the order in which declarations
2252 * appeared in the shader source.
2253 *
2254 * 2. Assign locations for any matching inputs and outputs.
2255 *
2256 * 3. Mark output variables in the producer that do not have locations as
2257 * not being outputs. This lets the optimizer eliminate them.
2258 *
2259 * 4. Mark input variables in the consumer that do not have locations as
2260 * not being inputs. This lets the optimizer eliminate them.
2261 */
2262 if (consumer)
2263 canonicalize_shader_io(consumer->ir, ir_var_shader_in);
2264
2265 if (producer)
2266 canonicalize_shader_io(producer->ir, ir_var_shader_out);
2267
2268 if (consumer)
2269 linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
2270 consumer_inputs,
2271 consumer_interface_inputs,
2272 consumer_inputs_with_locations);
2273
2274 if (producer) {
2275 foreach_in_list(ir_instruction, node, producer->ir) {
2276 ir_variable *const output_var = node->as_variable();
2277
2278 if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
2279 continue;
2280
2281 /* Only geometry shaders can use non-zero streams */
2282 assert(output_var->data.stream == 0 ||
2283 (output_var->data.stream < MAX_VERTEX_STREAMS &&
2284 producer->Stage == MESA_SHADER_GEOMETRY));
2285
2286 if (num_tfeedback_decls > 0) {
2287 tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates);
2288 g.process(output_var);
2289 }
2290
2291 ir_variable *const input_var =
2292 linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
2293 consumer_interface_inputs,
2294 consumer_inputs_with_locations);
2295
2296 /* If a matching input variable was found, add this output (and the
2297 * input) to the set. If this is a separable program and there is no
2298 * consumer stage, add the output.
2299 *
2300 * Always add TCS outputs. They are shared by all invocations
2301 * within a patch and can be used as shared memory.
2302 */
2303 if (input_var || (prog->SeparateShader && consumer == NULL) ||
2304 producer->Stage == MESA_SHADER_TESS_CTRL) {
2305 matches.record(output_var, input_var);
2306 }
2307
2308 /* Only stream 0 outputs can be consumed in the next stage */
2309 if (input_var && output_var->data.stream != 0) {
2310 linker_error(prog, "output %s is assigned to stream=%d but "
2311 "is linked to an input, which requires stream=0",
2312 output_var->name, output_var->data.stream);
2313 return false;
2314 }
2315 }
2316 } else {
2317 /* If there's no producer stage, then this must be a separable program.
2318 * For example, we may have a program that has just a fragment shader.
2319 * Later this program will be used with some arbitrary vertex (or
2320 * geometry) shader program. This means that locations must be assigned
2321 * for all the inputs.
2322 */
2323 foreach_in_list(ir_instruction, node, consumer->ir) {
2324 ir_variable *const input_var = node->as_variable();
2325
2326 if (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2327 continue;
2328
2329 matches.record(NULL, input_var);
2330 }
2331 }
2332
2333 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2334 if (!tfeedback_decls[i].is_varying())
2335 continue;
2336
2337 const tfeedback_candidate *matched_candidate
2338 = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
2339
2340 if (matched_candidate == NULL) {
2341 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2342 return false;
2343 }
2344
2345 /* Mark xfb varyings as always active */
2346 matched_candidate->toplevel_var->data.always_active_io = 1;
2347
2348 /* Mark any corresponding inputs as always active also. We must do this
2349 * because we have a NIR pass that lowers vectors to scalars and another
2350 * that removes unused varyings.
2351 * We don't split varyings marked as always active because there is no
2352 * point in doing so. This means we need to mark both sides of the
2353 * interface as always active otherwise we will have a mismatch and
2354 * start removing things we shouldn't.
2355 */
2356 ir_variable *const input_var =
2357 linker::get_matching_input(mem_ctx, matched_candidate->toplevel_var,
2358 consumer_inputs,
2359 consumer_interface_inputs,
2360 consumer_inputs_with_locations);
2361 if (input_var)
2362 input_var->data.always_active_io = 1;
2363
2364 if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
2365 matched_candidate->toplevel_var->data.is_xfb_only = 1;
2366 matches.record(matched_candidate->toplevel_var, NULL);
2367 }
2368 }
2369
2370 _mesa_hash_table_destroy(consumer_inputs, NULL);
2371 _mesa_hash_table_destroy(consumer_interface_inputs, NULL);
2372
2373 uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
2374 const unsigned slots_used = matches.assign_locations(
2375 prog, components, reserved_slots);
2376 matches.store_locations();
2377
2378 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2379 if (!tfeedback_decls[i].is_varying())
2380 continue;
2381
2382 if (!tfeedback_decls[i].assign_location(ctx, prog)) {
2383 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2384 return false;
2385 }
2386 }
2387 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2388
2389 if (consumer && producer) {
2390 foreach_in_list(ir_instruction, node, consumer->ir) {
2391 ir_variable *const var = node->as_variable();
2392
2393 if (var && var->data.mode == ir_var_shader_in &&
2394 var->data.is_unmatched_generic_inout) {
2395 if (!prog->IsES && prog->data->Version <= 120) {
2396 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
2397 *
2398 * Only those varying variables used (i.e. read) in
2399 * the fragment shader executable must be written to
2400 * by the vertex shader executable; declaring
2401 * superfluous varying variables in a vertex shader is
2402 * permissible.
2403 *
2404 * We interpret this text as meaning that the VS must
2405 * write the variable for the FS to read it. See
2406 * "glsl1-varying read but not written" in piglit.
2407 */
2408 linker_error(prog, "%s shader varying %s not written "
2409 "by %s shader\n.",
2410 _mesa_shader_stage_to_string(consumer->Stage),
2411 var->name,
2412 _mesa_shader_stage_to_string(producer->Stage));
2413 } else {
2414 linker_warning(prog, "%s shader varying %s not written "
2415 "by %s shader\n.",
2416 _mesa_shader_stage_to_string(consumer->Stage),
2417 var->name,
2418 _mesa_shader_stage_to_string(producer->Stage));
2419 }
2420 }
2421 }
2422
2423 /* Now that validation is done its safe to remove unused varyings. As
2424 * we have both a producer and consumer its safe to remove unused
2425 * varyings even if the program is a SSO because the stages are being
2426 * linked together i.e. we have a multi-stage SSO.
2427 */
2428 remove_unused_shader_inputs_and_outputs(false, producer,
2429 ir_var_shader_out);
2430 remove_unused_shader_inputs_and_outputs(false, consumer,
2431 ir_var_shader_in);
2432 }
2433
2434 if (producer) {
2435 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
2436 0, producer, disable_varying_packing,
2437 xfb_enabled);
2438 }
2439
2440 if (consumer) {
2441 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
2442 consumer_vertices, consumer,
2443 disable_varying_packing, xfb_enabled);
2444 }
2445
2446 return true;
2447 }
2448
2449 static bool
2450 check_against_output_limit(struct gl_context *ctx,
2451 struct gl_shader_program *prog,
2452 gl_linked_shader *producer,
2453 unsigned num_explicit_locations)
2454 {
2455 unsigned output_vectors = num_explicit_locations;
2456
2457 foreach_in_list(ir_instruction, node, producer->ir) {
2458 ir_variable *const var = node->as_variable();
2459
2460 if (var && !var->data.explicit_location &&
2461 var->data.mode == ir_var_shader_out &&
2462 var_counts_against_varying_limit(producer->Stage, var)) {
2463 /* outputs for fragment shader can't be doubles */
2464 output_vectors += var->type->count_attribute_slots(false);
2465 }
2466 }
2467
2468 assert(producer->Stage != MESA_SHADER_FRAGMENT);
2469 unsigned max_output_components =
2470 ctx->Const.Program[producer->Stage].MaxOutputComponents;
2471
2472 const unsigned output_components = output_vectors * 4;
2473 if (output_components > max_output_components) {
2474 if (ctx->API == API_OPENGLES2 || prog->IsES)
2475 linker_error(prog, "%s shader uses too many output vectors "
2476 "(%u > %u)\n",
2477 _mesa_shader_stage_to_string(producer->Stage),
2478 output_vectors,
2479 max_output_components / 4);
2480 else
2481 linker_error(prog, "%s shader uses too many output components "
2482 "(%u > %u)\n",
2483 _mesa_shader_stage_to_string(producer->Stage),
2484 output_components,
2485 max_output_components);
2486
2487 return false;
2488 }
2489
2490 return true;
2491 }
2492
2493 static bool
2494 check_against_input_limit(struct gl_context *ctx,
2495 struct gl_shader_program *prog,
2496 gl_linked_shader *consumer,
2497 unsigned num_explicit_locations)
2498 {
2499 unsigned input_vectors = num_explicit_locations;
2500
2501 foreach_in_list(ir_instruction, node, consumer->ir) {
2502 ir_variable *const var = node->as_variable();
2503
2504 if (var && !var->data.explicit_location &&
2505 var->data.mode == ir_var_shader_in &&
2506 var_counts_against_varying_limit(consumer->Stage, var)) {
2507 /* vertex inputs aren't varying counted */
2508 input_vectors += var->type->count_attribute_slots(false);
2509 }
2510 }
2511
2512 assert(consumer->Stage != MESA_SHADER_VERTEX);
2513 unsigned max_input_components =
2514 ctx->Const.Program[consumer->Stage].MaxInputComponents;
2515
2516 const unsigned input_components = input_vectors * 4;
2517 if (input_components > max_input_components) {
2518 if (ctx->API == API_OPENGLES2 || prog->IsES)
2519 linker_error(prog, "%s shader uses too many input vectors "
2520 "(%u > %u)\n",
2521 _mesa_shader_stage_to_string(consumer->Stage),
2522 input_vectors,
2523 max_input_components / 4);
2524 else
2525 linker_error(prog, "%s shader uses too many input components "
2526 "(%u > %u)\n",
2527 _mesa_shader_stage_to_string(consumer->Stage),
2528 input_components,
2529 max_input_components);
2530
2531 return false;
2532 }
2533
2534 return true;
2535 }
2536
2537 bool
2538 link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
2539 struct gl_context *ctx, void *mem_ctx)
2540 {
2541 bool has_xfb_qualifiers = false;
2542 unsigned num_tfeedback_decls = 0;
2543 char **varying_names = NULL;
2544 tfeedback_decl *tfeedback_decls = NULL;
2545
2546 /* From the ARB_enhanced_layouts spec:
2547 *
2548 * "If the shader used to record output variables for transform feedback
2549 * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
2550 * qualifiers, the values specified by TransformFeedbackVaryings are
2551 * ignored, and the set of variables captured for transform feedback is
2552 * instead derived from the specified layout qualifiers."
2553 */
2554 for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
2555 /* Find last stage before fragment shader */
2556 if (prog->_LinkedShaders[i]) {
2557 has_xfb_qualifiers =
2558 process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
2559 prog, &num_tfeedback_decls,
2560 &varying_names);
2561 break;
2562 }
2563 }
2564
2565 if (!has_xfb_qualifiers) {
2566 num_tfeedback_decls = prog->TransformFeedback.NumVarying;
2567 varying_names = prog->TransformFeedback.VaryingNames;
2568 }
2569
2570 if (num_tfeedback_decls != 0) {
2571 /* From GL_EXT_transform_feedback:
2572 * A program will fail to link if:
2573 *
2574 * * the <count> specified by TransformFeedbackVaryingsEXT is
2575 * non-zero, but the program object has no vertex or geometry
2576 * shader;
2577 */
2578 if (first >= MESA_SHADER_FRAGMENT) {
2579 linker_error(prog, "Transform feedback varyings specified, but "
2580 "no vertex, tessellation, or geometry shader is "
2581 "present.\n");
2582 return false;
2583 }
2584
2585 tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
2586 num_tfeedback_decls);
2587 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2588 varying_names, tfeedback_decls))
2589 return false;
2590 }
2591
2592 /* If there is no fragment shader we need to set transform feedback.
2593 *
2594 * For SSO we also need to assign output locations. We assign them here
2595 * because we need to do it for both single stage programs and multi stage
2596 * programs.
2597 */
2598 if (last < MESA_SHADER_FRAGMENT &&
2599 (num_tfeedback_decls != 0 || prog->SeparateShader)) {
2600 const uint64_t reserved_out_slots =
2601 reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
2602 if (!assign_varying_locations(ctx, mem_ctx, prog,
2603 prog->_LinkedShaders[last], NULL,
2604 num_tfeedback_decls, tfeedback_decls,
2605 reserved_out_slots))
2606 return false;
2607 }
2608
2609 if (last <= MESA_SHADER_FRAGMENT) {
2610 /* Remove unused varyings from the first/last stage unless SSO */
2611 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2612 prog->_LinkedShaders[first],
2613 ir_var_shader_in);
2614 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2615 prog->_LinkedShaders[last],
2616 ir_var_shader_out);
2617
2618 /* If the program is made up of only a single stage */
2619 if (first == last) {
2620 gl_linked_shader *const sh = prog->_LinkedShaders[last];
2621
2622 do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
2623 do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
2624 tfeedback_decls);
2625
2626 if (prog->SeparateShader) {
2627 const uint64_t reserved_slots =
2628 reserved_varying_slot(sh, ir_var_shader_in);
2629
2630 /* Assign input locations for SSO, output locations are already
2631 * assigned.
2632 */
2633 if (!assign_varying_locations(ctx, mem_ctx, prog,
2634 NULL /* producer */,
2635 sh /* consumer */,
2636 0 /* num_tfeedback_decls */,
2637 NULL /* tfeedback_decls */,
2638 reserved_slots))
2639 return false;
2640 }
2641 } else {
2642 /* Linking the stages in the opposite order (from fragment to vertex)
2643 * ensures that inter-shader outputs written to in an earlier stage
2644 * are eliminated if they are (transitively) not used in a later
2645 * stage.
2646 */
2647 int next = last;
2648 for (int i = next - 1; i >= 0; i--) {
2649 if (prog->_LinkedShaders[i] == NULL && i != 0)
2650 continue;
2651
2652 gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
2653 gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
2654
2655 const uint64_t reserved_out_slots =
2656 reserved_varying_slot(sh_i, ir_var_shader_out);
2657 const uint64_t reserved_in_slots =
2658 reserved_varying_slot(sh_next, ir_var_shader_in);
2659
2660 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2661 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2662 tfeedback_decls);
2663
2664 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2665 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2666 tfeedback_decls,
2667 reserved_out_slots | reserved_in_slots))
2668 return false;
2669
2670 /* This must be done after all dead varyings are eliminated. */
2671 if (sh_i != NULL) {
2672 unsigned slots_used = _mesa_bitcount_64(reserved_out_slots);
2673 if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
2674 return false;
2675 }
2676 }
2677
2678 unsigned slots_used = _mesa_bitcount_64(reserved_in_slots);
2679 if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
2680 return false;
2681
2682 next = i;
2683 }
2684 }
2685 }
2686
2687 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
2688 has_xfb_qualifiers))
2689 return false;
2690
2691 return true;
2692 }