glsl/linker: fix location aliasing checks for interface variables
[mesa.git] / src / compiler / glsl / link_varyings.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file link_varyings.cpp
26 *
27 * Linker functions related specifically to linking varyings between shader
28 * stages.
29 */
30
31
32 #include "main/mtypes.h"
33 #include "glsl_symbol_table.h"
34 #include "glsl_parser_extras.h"
35 #include "ir_optimization.h"
36 #include "linker.h"
37 #include "link_varyings.h"
38 #include "main/macros.h"
39 #include "util/hash_table.h"
40 #include "program.h"
41
42
43 /**
44 * Get the varying type stripped of the outermost array if we're processing
45 * a stage whose varyings are arrays indexed by a vertex number (such as
46 * geometry shader inputs).
47 */
48 static const glsl_type *
49 get_varying_type(const ir_variable *var, gl_shader_stage stage)
50 {
51 const glsl_type *type = var->type;
52
53 if (!var->data.patch &&
54 ((var->data.mode == ir_var_shader_out &&
55 stage == MESA_SHADER_TESS_CTRL) ||
56 (var->data.mode == ir_var_shader_in &&
57 (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
58 stage == MESA_SHADER_GEOMETRY)))) {
59 assert(type->is_array());
60 type = type->fields.array;
61 }
62
63 return type;
64 }
65
66 static void
67 create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
68 size_t name_length, unsigned *count,
69 const char *ifc_member_name,
70 const glsl_type *ifc_member_t, char ***varying_names)
71 {
72 if (t->is_interface()) {
73 size_t new_length = name_length;
74
75 assert(ifc_member_name && ifc_member_t);
76 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
77
78 create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
79 NULL, NULL, varying_names);
80 } else if (t->is_record()) {
81 for (unsigned i = 0; i < t->length; i++) {
82 const char *field = t->fields.structure[i].name;
83 size_t new_length = name_length;
84
85 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
86
87 create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
88 new_length, count, NULL, NULL,
89 varying_names);
90 }
91 } else if (t->without_array()->is_record() ||
92 t->without_array()->is_interface() ||
93 (t->is_array() && t->fields.array->is_array())) {
94 for (unsigned i = 0; i < t->length; i++) {
95 size_t new_length = name_length;
96
97 /* Append the subscript to the current variable name */
98 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
99
100 create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
101 count, ifc_member_name, ifc_member_t,
102 varying_names);
103 }
104 } else {
105 (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
106 }
107 }
108
109 static bool
110 process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
111 struct gl_shader_program *prog,
112 unsigned *num_tfeedback_decls,
113 char ***varying_names)
114 {
115 bool has_xfb_qualifiers = false;
116
117 /* We still need to enable transform feedback mode even if xfb_stride is
118 * only applied to a global out. Also we don't bother to propagate
119 * xfb_stride to interface block members so this will catch that case also.
120 */
121 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
122 if (prog->TransformFeedback.BufferStride[j]) {
123 has_xfb_qualifiers = true;
124 break;
125 }
126 }
127
128 foreach_in_list(ir_instruction, node, sh->ir) {
129 ir_variable *var = node->as_variable();
130 if (!var || var->data.mode != ir_var_shader_out)
131 continue;
132
133 /* From the ARB_enhanced_layouts spec:
134 *
135 * "Any shader making any static use (after preprocessing) of any of
136 * these *xfb_* qualifiers will cause the shader to be in a
137 * transform feedback capturing mode and hence responsible for
138 * describing the transform feedback setup. This mode will capture
139 * any output selected by *xfb_offset*, directly or indirectly, to
140 * a transform feedback buffer."
141 */
142 if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
143 has_xfb_qualifiers = true;
144 }
145
146 if (var->data.explicit_xfb_offset) {
147 *num_tfeedback_decls += var->type->varying_count();
148 has_xfb_qualifiers = true;
149 }
150 }
151
152 if (*num_tfeedback_decls == 0)
153 return has_xfb_qualifiers;
154
155 unsigned i = 0;
156 *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
157 foreach_in_list(ir_instruction, node, sh->ir) {
158 ir_variable *var = node->as_variable();
159 if (!var || var->data.mode != ir_var_shader_out)
160 continue;
161
162 if (var->data.explicit_xfb_offset) {
163 char *name;
164 const glsl_type *type, *member_type;
165
166 if (var->data.from_named_ifc_block) {
167 type = var->get_interface_type();
168 /* Find the member type before it was altered by lowering */
169 member_type =
170 type->fields.structure[type->field_index(var->name)].type;
171 name = ralloc_strdup(NULL, type->without_array()->name);
172 } else {
173 type = var->type;
174 member_type = NULL;
175 name = ralloc_strdup(NULL, var->name);
176 }
177 create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
178 var->name, member_type, varying_names);
179 ralloc_free(name);
180 }
181 }
182
183 assert(i == *num_tfeedback_decls);
184 return has_xfb_qualifiers;
185 }
186
187 /**
188 * Validate the types and qualifiers of an output from one stage against the
189 * matching input to another stage.
190 */
191 static void
192 cross_validate_types_and_qualifiers(struct gl_shader_program *prog,
193 const ir_variable *input,
194 const ir_variable *output,
195 gl_shader_stage consumer_stage,
196 gl_shader_stage producer_stage)
197 {
198 /* Check that the types match between stages.
199 */
200 const glsl_type *type_to_match = input->type;
201
202 /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
203 const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
204 consumer_stage != MESA_SHADER_FRAGMENT) ||
205 consumer_stage == MESA_SHADER_GEOMETRY;
206 if (extra_array_level) {
207 assert(type_to_match->is_array());
208 type_to_match = type_to_match->fields.array;
209 }
210
211 if (type_to_match != output->type) {
212 /* There is a bit of a special case for gl_TexCoord. This
213 * built-in is unsized by default. Applications that variable
214 * access it must redeclare it with a size. There is some
215 * language in the GLSL spec that implies the fragment shader
216 * and vertex shader do not have to agree on this size. Other
217 * driver behave this way, and one or two applications seem to
218 * rely on it.
219 *
220 * Neither declaration needs to be modified here because the array
221 * sizes are fixed later when update_array_sizes is called.
222 *
223 * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
224 *
225 * "Unlike user-defined varying variables, the built-in
226 * varying variables don't have a strict one-to-one
227 * correspondence between the vertex language and the
228 * fragment language."
229 */
230 if (!output->type->is_array() || !is_gl_identifier(output->name)) {
231 linker_error(prog,
232 "%s shader output `%s' declared as type `%s', "
233 "but %s shader input declared as type `%s'\n",
234 _mesa_shader_stage_to_string(producer_stage),
235 output->name,
236 output->type->name,
237 _mesa_shader_stage_to_string(consumer_stage),
238 input->type->name);
239 return;
240 }
241 }
242
243 /* Check that all of the qualifiers match between stages.
244 */
245
246 /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
247 * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
248 * conformance test suite does not verify that the qualifiers must match.
249 * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
250 * OpenGLES 3.0 drivers, so we relax the checking in all cases.
251 */
252 if (false /* always skip the centroid check */ &&
253 prog->data->Version < (prog->IsES ? 310 : 430) &&
254 input->data.centroid != output->data.centroid) {
255 linker_error(prog,
256 "%s shader output `%s' %s centroid qualifier, "
257 "but %s shader input %s centroid qualifier\n",
258 _mesa_shader_stage_to_string(producer_stage),
259 output->name,
260 (output->data.centroid) ? "has" : "lacks",
261 _mesa_shader_stage_to_string(consumer_stage),
262 (input->data.centroid) ? "has" : "lacks");
263 return;
264 }
265
266 if (input->data.sample != output->data.sample) {
267 linker_error(prog,
268 "%s shader output `%s' %s sample qualifier, "
269 "but %s shader input %s sample qualifier\n",
270 _mesa_shader_stage_to_string(producer_stage),
271 output->name,
272 (output->data.sample) ? "has" : "lacks",
273 _mesa_shader_stage_to_string(consumer_stage),
274 (input->data.sample) ? "has" : "lacks");
275 return;
276 }
277
278 if (input->data.patch != output->data.patch) {
279 linker_error(prog,
280 "%s shader output `%s' %s patch qualifier, "
281 "but %s shader input %s patch qualifier\n",
282 _mesa_shader_stage_to_string(producer_stage),
283 output->name,
284 (output->data.patch) ? "has" : "lacks",
285 _mesa_shader_stage_to_string(consumer_stage),
286 (input->data.patch) ? "has" : "lacks");
287 return;
288 }
289
290 /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
291 *
292 * "As only outputs need be declared with invariant, an output from
293 * one shader stage will still match an input of a subsequent stage
294 * without the input being declared as invariant."
295 *
296 * while GLSL 4.20 says:
297 *
298 * "For variables leaving one shader and coming into another shader,
299 * the invariant keyword has to be used in both shaders, or a link
300 * error will result."
301 *
302 * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
303 *
304 * "The invariance of varyings that are declared in both the vertex
305 * and fragment shaders must match."
306 */
307 if (input->data.invariant != output->data.invariant &&
308 prog->data->Version < (prog->IsES ? 300 : 430)) {
309 linker_error(prog,
310 "%s shader output `%s' %s invariant qualifier, "
311 "but %s shader input %s invariant qualifier\n",
312 _mesa_shader_stage_to_string(producer_stage),
313 output->name,
314 (output->data.invariant) ? "has" : "lacks",
315 _mesa_shader_stage_to_string(consumer_stage),
316 (input->data.invariant) ? "has" : "lacks");
317 return;
318 }
319
320 /* GLSL >= 4.40 removes text requiring interpolation qualifiers
321 * to match cross stage, they must only match within the same stage.
322 *
323 * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
324 *
325 * "It is a link-time error if, within the same stage, the interpolation
326 * qualifiers of variables of the same name do not match.
327 *
328 * Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
329 *
330 * "When no interpolation qualifier is present, smooth interpolation
331 * is used."
332 *
333 * So we match variables where one is smooth and the other has no explicit
334 * qualifier.
335 */
336 unsigned input_interpolation = input->data.interpolation;
337 unsigned output_interpolation = output->data.interpolation;
338 if (prog->IsES) {
339 if (input_interpolation == INTERP_MODE_NONE)
340 input_interpolation = INTERP_MODE_SMOOTH;
341 if (output_interpolation == INTERP_MODE_NONE)
342 output_interpolation = INTERP_MODE_SMOOTH;
343 }
344 if (input_interpolation != output_interpolation &&
345 prog->data->Version < 440) {
346 linker_error(prog,
347 "%s shader output `%s' specifies %s "
348 "interpolation qualifier, "
349 "but %s shader input specifies %s "
350 "interpolation qualifier\n",
351 _mesa_shader_stage_to_string(producer_stage),
352 output->name,
353 interpolation_string(output->data.interpolation),
354 _mesa_shader_stage_to_string(consumer_stage),
355 interpolation_string(input->data.interpolation));
356 return;
357 }
358 }
359
360 /**
361 * Validate front and back color outputs against single color input
362 */
363 static void
364 cross_validate_front_and_back_color(struct gl_shader_program *prog,
365 const ir_variable *input,
366 const ir_variable *front_color,
367 const ir_variable *back_color,
368 gl_shader_stage consumer_stage,
369 gl_shader_stage producer_stage)
370 {
371 if (front_color != NULL && front_color->data.assigned)
372 cross_validate_types_and_qualifiers(prog, input, front_color,
373 consumer_stage, producer_stage);
374
375 if (back_color != NULL && back_color->data.assigned)
376 cross_validate_types_and_qualifiers(prog, input, back_color,
377 consumer_stage, producer_stage);
378 }
379
380 static unsigned
381 compute_variable_location_slot(ir_variable *var, gl_shader_stage stage)
382 {
383 unsigned location_start = VARYING_SLOT_VAR0;
384
385 switch (stage) {
386 case MESA_SHADER_VERTEX:
387 if (var->data.mode == ir_var_shader_in)
388 location_start = VERT_ATTRIB_GENERIC0;
389 break;
390 case MESA_SHADER_TESS_CTRL:
391 case MESA_SHADER_TESS_EVAL:
392 if (var->data.patch)
393 location_start = VARYING_SLOT_PATCH0;
394 break;
395 case MESA_SHADER_FRAGMENT:
396 if (var->data.mode == ir_var_shader_out)
397 location_start = FRAG_RESULT_DATA0;
398 break;
399 default:
400 break;
401 }
402
403 return var->data.location - location_start;
404 }
405
406 struct explicit_location_info {
407 ir_variable *var;
408 unsigned base_type;
409 };
410
411 static bool
412 check_location_aliasing(struct explicit_location_info explicit_locations[][4],
413 ir_variable *var,
414 unsigned location,
415 unsigned component,
416 unsigned location_limit,
417 const glsl_type *type,
418 gl_shader_program *prog,
419 gl_shader_stage stage)
420 {
421 unsigned last_comp;
422 if (type->without_array()->is_record()) {
423 /* The component qualifier can't be used on structs so just treat
424 * all component slots as used.
425 */
426 last_comp = 4;
427 } else {
428 unsigned dmul = type->without_array()->is_64bit() ? 2 : 1;
429 last_comp = component + type->without_array()->vector_elements * dmul;
430 }
431
432 while (location < location_limit) {
433 unsigned i = component;
434 while (i < last_comp) {
435 if (explicit_locations[location][i].var != NULL) {
436 linker_error(prog,
437 "%s shader has multiple outputs explicitly "
438 "assigned to location %d and component %d\n",
439 _mesa_shader_stage_to_string(stage),
440 location, component);
441 return false;
442 }
443
444 /* Make sure all component at this location have the same type.
445 */
446 for (unsigned j = 0; j < 4; j++) {
447 if (explicit_locations[location][j].var &&
448 explicit_locations[location][j].base_type !=
449 type->without_array()->base_type) {
450 linker_error(prog,
451 "Varyings sharing the same location must "
452 "have the same underlying numerical type. "
453 "Location %u component %u\n", location, component);
454 return false;
455 }
456 }
457
458 explicit_locations[location][i].var = var;
459 explicit_locations[location][i].base_type =
460 type->without_array()->base_type;
461 i++;
462
463 /* We need to do some special handling for doubles as dvec3 and
464 * dvec4 consume two consecutive locations. We don't need to
465 * worry about components beginning at anything other than 0 as
466 * the spec does not allow this for dvec3 and dvec4.
467 */
468 if (i == 4 && last_comp > 4) {
469 last_comp = last_comp - 4;
470 /* Bump location index and reset the component index */
471 location++;
472 i = 0;
473 }
474 }
475
476 location++;
477 }
478
479 return true;
480 }
481
482 /**
483 * Validate that outputs from one stage match inputs of another
484 */
485 void
486 cross_validate_outputs_to_inputs(struct gl_context *ctx,
487 struct gl_shader_program *prog,
488 gl_linked_shader *producer,
489 gl_linked_shader *consumer)
490 {
491 glsl_symbol_table parameters;
492 struct explicit_location_info explicit_locations[MAX_VARYING][4] = { 0 };
493
494 /* Find all shader outputs in the "producer" stage.
495 */
496 foreach_in_list(ir_instruction, node, producer->ir) {
497 ir_variable *const var = node->as_variable();
498
499 if (var == NULL || var->data.mode != ir_var_shader_out)
500 continue;
501
502 if (!var->data.explicit_location
503 || var->data.location < VARYING_SLOT_VAR0)
504 parameters.add_variable(var);
505 else {
506 /* User-defined varyings with explicit locations are handled
507 * differently because they do not need to have matching names.
508 */
509 const glsl_type *type = get_varying_type(var, producer->Stage);
510 unsigned num_elements = type->count_attribute_slots(false);
511 unsigned idx = compute_variable_location_slot(var, producer->Stage);
512 unsigned slot_limit = idx + num_elements;
513
514 unsigned slot_max =
515 ctx->Const.Program[producer->Stage].MaxOutputComponents / 4;
516 if (slot_limit > slot_max) {
517 linker_error(prog,
518 "Invalid location %u in %s shader\n",
519 idx, _mesa_shader_stage_to_string(producer->Stage));
520 return;
521 }
522
523 if (type->without_array()->is_interface()) {
524 for (unsigned i = 0; i < type->without_array()->length; i++) {
525 const glsl_type *field_type = type->fields.structure[i].type;
526 unsigned field_location = type->fields.structure[i].location -
527 (type->fields.structure[i].patch ? VARYING_SLOT_PATCH0 :
528 VARYING_SLOT_VAR0);
529 if (!check_location_aliasing(explicit_locations, var,
530 field_location,
531 0, field_location + 1,
532 field_type, prog,
533 producer->Stage)) {
534 return;
535 }
536 }
537 } else if (!check_location_aliasing(explicit_locations, var,
538 idx, var->data.location_frac,
539 slot_limit, type, prog,
540 producer->Stage)) {
541 return;
542 }
543 }
544 }
545
546
547 /* Find all shader inputs in the "consumer" stage. Any variables that have
548 * matching outputs already in the symbol table must have the same type and
549 * qualifiers.
550 *
551 * Exception: if the consumer is the geometry shader, then the inputs
552 * should be arrays and the type of the array element should match the type
553 * of the corresponding producer output.
554 */
555 foreach_in_list(ir_instruction, node, consumer->ir) {
556 ir_variable *const input = node->as_variable();
557
558 if (input == NULL || input->data.mode != ir_var_shader_in)
559 continue;
560
561 if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
562 const ir_variable *const front_color =
563 parameters.get_variable("gl_FrontColor");
564
565 const ir_variable *const back_color =
566 parameters.get_variable("gl_BackColor");
567
568 cross_validate_front_and_back_color(prog, input,
569 front_color, back_color,
570 consumer->Stage, producer->Stage);
571 } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
572 const ir_variable *const front_color =
573 parameters.get_variable("gl_FrontSecondaryColor");
574
575 const ir_variable *const back_color =
576 parameters.get_variable("gl_BackSecondaryColor");
577
578 cross_validate_front_and_back_color(prog, input,
579 front_color, back_color,
580 consumer->Stage, producer->Stage);
581 } else {
582 /* The rules for connecting inputs and outputs change in the presence
583 * of explicit locations. In this case, we no longer care about the
584 * names of the variables. Instead, we care only about the
585 * explicitly assigned location.
586 */
587 ir_variable *output = NULL;
588 if (input->data.explicit_location
589 && input->data.location >= VARYING_SLOT_VAR0) {
590
591 const glsl_type *type = get_varying_type(input, consumer->Stage);
592 unsigned num_elements = type->count_attribute_slots(false);
593 unsigned idx =
594 compute_variable_location_slot(input, consumer->Stage);
595 unsigned slot_limit = idx + num_elements;
596
597 while (idx < slot_limit) {
598 if (idx >= MAX_VARYING) {
599 linker_error(prog,
600 "Invalid location %u in %s shader\n", idx,
601 _mesa_shader_stage_to_string(consumer->Stage));
602 return;
603 }
604
605 output = explicit_locations[idx][input->data.location_frac].var;
606
607 if (output == NULL ||
608 input->data.location != output->data.location) {
609 linker_error(prog,
610 "%s shader input `%s' with explicit location "
611 "has no matching output\n",
612 _mesa_shader_stage_to_string(consumer->Stage),
613 input->name);
614 break;
615 }
616 idx++;
617 }
618 } else {
619 output = parameters.get_variable(input->name);
620 }
621
622 if (output != NULL) {
623 /* Interface blocks have their own validation elsewhere so don't
624 * try validating them here.
625 */
626 if (!(input->get_interface_type() &&
627 output->get_interface_type()))
628 cross_validate_types_and_qualifiers(prog, input, output,
629 consumer->Stage,
630 producer->Stage);
631 } else {
632 /* Check for input vars with unmatched output vars in prev stage
633 * taking into account that interface blocks could have a matching
634 * output but with different name, so we ignore them.
635 */
636 assert(!input->data.assigned);
637 if (input->data.used && !input->get_interface_type() &&
638 !input->data.explicit_location && !prog->SeparateShader)
639 linker_error(prog,
640 "%s shader input `%s' "
641 "has no matching output in the previous stage\n",
642 _mesa_shader_stage_to_string(consumer->Stage),
643 input->name);
644 }
645 }
646 }
647 }
648
649 /**
650 * Demote shader inputs and outputs that are not used in other stages, and
651 * remove them via dead code elimination.
652 */
653 static void
654 remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
655 gl_linked_shader *sh,
656 enum ir_variable_mode mode)
657 {
658 if (is_separate_shader_object)
659 return;
660
661 foreach_in_list(ir_instruction, node, sh->ir) {
662 ir_variable *const var = node->as_variable();
663
664 if (var == NULL || var->data.mode != int(mode))
665 continue;
666
667 /* A shader 'in' or 'out' variable is only really an input or output if
668 * its value is used by other shader stages. This will cause the
669 * variable to have a location assigned.
670 */
671 if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
672 assert(var->data.mode != ir_var_temporary);
673
674 /* Assign zeros to demoted inputs to allow more optimizations. */
675 if (var->data.mode == ir_var_shader_in && !var->constant_value)
676 var->constant_value = ir_constant::zero(var, var->type);
677
678 var->data.mode = ir_var_auto;
679 }
680 }
681
682 /* Eliminate code that is now dead due to unused inputs/outputs being
683 * demoted.
684 */
685 while (do_dead_code(sh->ir, false))
686 ;
687
688 }
689
690 /**
691 * Initialize this object based on a string that was passed to
692 * glTransformFeedbackVaryings.
693 *
694 * If the input is mal-formed, this call still succeeds, but it sets
695 * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
696 * will fail to find any matching variable.
697 */
698 void
699 tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
700 const char *input)
701 {
702 /* We don't have to be pedantic about what is a valid GLSL variable name,
703 * because any variable with an invalid name can't exist in the IR anyway.
704 */
705
706 this->location = -1;
707 this->orig_name = input;
708 this->lowered_builtin_array_variable = none;
709 this->skip_components = 0;
710 this->next_buffer_separator = false;
711 this->matched_candidate = NULL;
712 this->stream_id = 0;
713 this->buffer = 0;
714 this->offset = 0;
715
716 if (ctx->Extensions.ARB_transform_feedback3) {
717 /* Parse gl_NextBuffer. */
718 if (strcmp(input, "gl_NextBuffer") == 0) {
719 this->next_buffer_separator = true;
720 return;
721 }
722
723 /* Parse gl_SkipComponents. */
724 if (strcmp(input, "gl_SkipComponents1") == 0)
725 this->skip_components = 1;
726 else if (strcmp(input, "gl_SkipComponents2") == 0)
727 this->skip_components = 2;
728 else if (strcmp(input, "gl_SkipComponents3") == 0)
729 this->skip_components = 3;
730 else if (strcmp(input, "gl_SkipComponents4") == 0)
731 this->skip_components = 4;
732
733 if (this->skip_components)
734 return;
735 }
736
737 /* Parse a declaration. */
738 const char *base_name_end;
739 long subscript = parse_program_resource_name(input, &base_name_end);
740 this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
741 if (this->var_name == NULL) {
742 _mesa_error_no_memory(__func__);
743 return;
744 }
745
746 if (subscript >= 0) {
747 this->array_subscript = subscript;
748 this->is_subscripted = true;
749 } else {
750 this->is_subscripted = false;
751 }
752
753 /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
754 * class must behave specially to account for the fact that gl_ClipDistance
755 * is converted from a float[8] to a vec4[2].
756 */
757 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
758 strcmp(this->var_name, "gl_ClipDistance") == 0) {
759 this->lowered_builtin_array_variable = clip_distance;
760 }
761 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
762 strcmp(this->var_name, "gl_CullDistance") == 0) {
763 this->lowered_builtin_array_variable = cull_distance;
764 }
765
766 if (ctx->Const.LowerTessLevel &&
767 (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
768 this->lowered_builtin_array_variable = tess_level_outer;
769 if (ctx->Const.LowerTessLevel &&
770 (strcmp(this->var_name, "gl_TessLevelInner") == 0))
771 this->lowered_builtin_array_variable = tess_level_inner;
772 }
773
774
775 /**
776 * Determine whether two tfeedback_decl objects refer to the same variable and
777 * array index (if applicable).
778 */
779 bool
780 tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
781 {
782 assert(x.is_varying() && y.is_varying());
783
784 if (strcmp(x.var_name, y.var_name) != 0)
785 return false;
786 if (x.is_subscripted != y.is_subscripted)
787 return false;
788 if (x.is_subscripted && x.array_subscript != y.array_subscript)
789 return false;
790 return true;
791 }
792
793
794 /**
795 * Assign a location and stream ID for this tfeedback_decl object based on the
796 * transform feedback candidate found by find_candidate.
797 *
798 * If an error occurs, the error is reported through linker_error() and false
799 * is returned.
800 */
801 bool
802 tfeedback_decl::assign_location(struct gl_context *ctx,
803 struct gl_shader_program *prog)
804 {
805 assert(this->is_varying());
806
807 unsigned fine_location
808 = this->matched_candidate->toplevel_var->data.location * 4
809 + this->matched_candidate->toplevel_var->data.location_frac
810 + this->matched_candidate->offset;
811 const unsigned dmul =
812 this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
813
814 if (this->matched_candidate->type->is_array()) {
815 /* Array variable */
816 const unsigned matrix_cols =
817 this->matched_candidate->type->fields.array->matrix_columns;
818 const unsigned vector_elements =
819 this->matched_candidate->type->fields.array->vector_elements;
820 unsigned actual_array_size;
821 switch (this->lowered_builtin_array_variable) {
822 case clip_distance:
823 actual_array_size = prog->last_vert_prog ?
824 prog->last_vert_prog->info.clip_distance_array_size : 0;
825 break;
826 case cull_distance:
827 actual_array_size = prog->last_vert_prog ?
828 prog->last_vert_prog->info.cull_distance_array_size : 0;
829 break;
830 case tess_level_outer:
831 actual_array_size = 4;
832 break;
833 case tess_level_inner:
834 actual_array_size = 2;
835 break;
836 case none:
837 default:
838 actual_array_size = this->matched_candidate->type->array_size();
839 break;
840 }
841
842 if (this->is_subscripted) {
843 /* Check array bounds. */
844 if (this->array_subscript >= actual_array_size) {
845 linker_error(prog, "Transform feedback varying %s has index "
846 "%i, but the array size is %u.",
847 this->orig_name, this->array_subscript,
848 actual_array_size);
849 return false;
850 }
851 unsigned array_elem_size = this->lowered_builtin_array_variable ?
852 1 : vector_elements * matrix_cols * dmul;
853 fine_location += array_elem_size * this->array_subscript;
854 this->size = 1;
855 } else {
856 this->size = actual_array_size;
857 }
858 this->vector_elements = vector_elements;
859 this->matrix_columns = matrix_cols;
860 if (this->lowered_builtin_array_variable)
861 this->type = GL_FLOAT;
862 else
863 this->type = this->matched_candidate->type->fields.array->gl_type;
864 } else {
865 /* Regular variable (scalar, vector, or matrix) */
866 if (this->is_subscripted) {
867 linker_error(prog, "Transform feedback varying %s requested, "
868 "but %s is not an array.",
869 this->orig_name, this->var_name);
870 return false;
871 }
872 this->size = 1;
873 this->vector_elements = this->matched_candidate->type->vector_elements;
874 this->matrix_columns = this->matched_candidate->type->matrix_columns;
875 this->type = this->matched_candidate->type->gl_type;
876 }
877 this->location = fine_location / 4;
878 this->location_frac = fine_location % 4;
879
880 /* From GL_EXT_transform_feedback:
881 * A program will fail to link if:
882 *
883 * * the total number of components to capture in any varying
884 * variable in <varyings> is greater than the constant
885 * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
886 * buffer mode is SEPARATE_ATTRIBS_EXT;
887 */
888 if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
889 this->num_components() >
890 ctx->Const.MaxTransformFeedbackSeparateComponents) {
891 linker_error(prog, "Transform feedback varying %s exceeds "
892 "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
893 this->orig_name);
894 return false;
895 }
896
897 /* Only transform feedback varyings can be assigned to non-zero streams,
898 * so assign the stream id here.
899 */
900 this->stream_id = this->matched_candidate->toplevel_var->data.stream;
901
902 unsigned array_offset = this->array_subscript * 4 * dmul;
903 unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
904 this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
905 this->offset = this->matched_candidate->toplevel_var->data.offset +
906 array_offset + struct_offset;
907
908 return true;
909 }
910
911
912 unsigned
913 tfeedback_decl::get_num_outputs() const
914 {
915 if (!this->is_varying()) {
916 return 0;
917 }
918 return (this->num_components() + this->location_frac + 3)/4;
919 }
920
921
922 /**
923 * Update gl_transform_feedback_info to reflect this tfeedback_decl.
924 *
925 * If an error occurs, the error is reported through linker_error() and false
926 * is returned.
927 */
928 bool
929 tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
930 struct gl_transform_feedback_info *info,
931 unsigned buffer, unsigned buffer_index,
932 const unsigned max_outputs, bool *explicit_stride,
933 bool has_xfb_qualifiers) const
934 {
935 unsigned xfb_offset = 0;
936 unsigned size = this->size;
937 /* Handle gl_SkipComponents. */
938 if (this->skip_components) {
939 info->Buffers[buffer].Stride += this->skip_components;
940 size = this->skip_components;
941 goto store_varying;
942 }
943
944 if (this->next_buffer_separator) {
945 size = 0;
946 goto store_varying;
947 }
948
949 if (has_xfb_qualifiers) {
950 xfb_offset = this->offset / 4;
951 } else {
952 xfb_offset = info->Buffers[buffer].Stride;
953 }
954 info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
955
956 {
957 unsigned location = this->location;
958 unsigned location_frac = this->location_frac;
959 unsigned num_components = this->num_components();
960 while (num_components > 0) {
961 unsigned output_size = MIN2(num_components, 4 - location_frac);
962 assert((info->NumOutputs == 0 && max_outputs == 0) ||
963 info->NumOutputs < max_outputs);
964
965 /* From the ARB_enhanced_layouts spec:
966 *
967 * "If such a block member or variable is not written during a shader
968 * invocation, the buffer contents at the assigned offset will be
969 * undefined. Even if there are no static writes to a variable or
970 * member that is assigned a transform feedback offset, the space is
971 * still allocated in the buffer and still affects the stride."
972 */
973 if (this->is_varying_written()) {
974 info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
975 info->Outputs[info->NumOutputs].OutputRegister = location;
976 info->Outputs[info->NumOutputs].NumComponents = output_size;
977 info->Outputs[info->NumOutputs].StreamId = stream_id;
978 info->Outputs[info->NumOutputs].OutputBuffer = buffer;
979 info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
980 ++info->NumOutputs;
981 }
982 info->Buffers[buffer].Stream = this->stream_id;
983 xfb_offset += output_size;
984
985 num_components -= output_size;
986 location++;
987 location_frac = 0;
988 }
989 }
990
991 if (explicit_stride && explicit_stride[buffer]) {
992 if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
993 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
994 "multiple of 8 as its applied to a type that is or "
995 "contains a double.",
996 info->Buffers[buffer].Stride * 4);
997 return false;
998 }
999
1000 if ((this->offset / 4) / info->Buffers[buffer].Stride !=
1001 (xfb_offset - 1) / info->Buffers[buffer].Stride) {
1002 linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
1003 "buffer (%d)", xfb_offset * 4,
1004 info->Buffers[buffer].Stride * 4, buffer);
1005 return false;
1006 }
1007 } else {
1008 info->Buffers[buffer].Stride = xfb_offset;
1009 }
1010
1011 /* From GL_EXT_transform_feedback:
1012 * A program will fail to link if:
1013 *
1014 * * the total number of components to capture is greater than
1015 * the constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
1016 * and the buffer mode is INTERLEAVED_ATTRIBS_EXT.
1017 *
1018 * From GL_ARB_enhanced_layouts:
1019 *
1020 * "The resulting stride (implicit or explicit) must be less than or
1021 * equal to the implementation-dependent constant
1022 * gl_MaxTransformFeedbackInterleavedComponents."
1023 */
1024 if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
1025 has_xfb_qualifiers) &&
1026 info->Buffers[buffer].Stride >
1027 ctx->Const.MaxTransformFeedbackInterleavedComponents) {
1028 linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1029 "limit has been exceeded.");
1030 return false;
1031 }
1032
1033 store_varying:
1034 info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
1035 this->orig_name);
1036 info->Varyings[info->NumVarying].Type = this->type;
1037 info->Varyings[info->NumVarying].Size = size;
1038 info->Varyings[info->NumVarying].BufferIndex = buffer_index;
1039 info->NumVarying++;
1040 info->Buffers[buffer].NumVaryings++;
1041
1042 return true;
1043 }
1044
1045
1046 const tfeedback_candidate *
1047 tfeedback_decl::find_candidate(gl_shader_program *prog,
1048 hash_table *tfeedback_candidates)
1049 {
1050 const char *name = this->var_name;
1051 switch (this->lowered_builtin_array_variable) {
1052 case none:
1053 name = this->var_name;
1054 break;
1055 case clip_distance:
1056 name = "gl_ClipDistanceMESA";
1057 break;
1058 case cull_distance:
1059 name = "gl_CullDistanceMESA";
1060 break;
1061 case tess_level_outer:
1062 name = "gl_TessLevelOuterMESA";
1063 break;
1064 case tess_level_inner:
1065 name = "gl_TessLevelInnerMESA";
1066 break;
1067 }
1068 hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
1069
1070 this->matched_candidate = entry ?
1071 (const tfeedback_candidate *) entry->data : NULL;
1072
1073 if (!this->matched_candidate) {
1074 /* From GL_EXT_transform_feedback:
1075 * A program will fail to link if:
1076 *
1077 * * any variable name specified in the <varyings> array is not
1078 * declared as an output in the geometry shader (if present) or
1079 * the vertex shader (if no geometry shader is present);
1080 */
1081 linker_error(prog, "Transform feedback varying %s undeclared.",
1082 this->orig_name);
1083 }
1084
1085 return this->matched_candidate;
1086 }
1087
1088
1089 /**
1090 * Parse all the transform feedback declarations that were passed to
1091 * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
1092 *
1093 * If an error occurs, the error is reported through linker_error() and false
1094 * is returned.
1095 */
1096 static bool
1097 parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
1098 const void *mem_ctx, unsigned num_names,
1099 char **varying_names, tfeedback_decl *decls)
1100 {
1101 for (unsigned i = 0; i < num_names; ++i) {
1102 decls[i].init(ctx, mem_ctx, varying_names[i]);
1103
1104 if (!decls[i].is_varying())
1105 continue;
1106
1107 /* From GL_EXT_transform_feedback:
1108 * A program will fail to link if:
1109 *
1110 * * any two entries in the <varyings> array specify the same varying
1111 * variable;
1112 *
1113 * We interpret this to mean "any two entries in the <varyings> array
1114 * specify the same varying variable and array index", since transform
1115 * feedback of arrays would be useless otherwise.
1116 */
1117 for (unsigned j = 0; j < i; ++j) {
1118 if (!decls[j].is_varying())
1119 continue;
1120
1121 if (tfeedback_decl::is_same(decls[i], decls[j])) {
1122 linker_error(prog, "Transform feedback varying %s specified "
1123 "more than once.", varying_names[i]);
1124 return false;
1125 }
1126 }
1127 }
1128 return true;
1129 }
1130
1131
1132 static int
1133 cmp_xfb_offset(const void * x_generic, const void * y_generic)
1134 {
1135 tfeedback_decl *x = (tfeedback_decl *) x_generic;
1136 tfeedback_decl *y = (tfeedback_decl *) y_generic;
1137
1138 if (x->get_buffer() != y->get_buffer())
1139 return x->get_buffer() - y->get_buffer();
1140 return x->get_offset() - y->get_offset();
1141 }
1142
1143 /**
1144 * Store transform feedback location assignments into
1145 * prog->sh.LinkedTransformFeedback based on the data stored in
1146 * tfeedback_decls.
1147 *
1148 * If an error occurs, the error is reported through linker_error() and false
1149 * is returned.
1150 */
1151 static bool
1152 store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
1153 unsigned num_tfeedback_decls,
1154 tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers)
1155 {
1156 if (!prog->last_vert_prog)
1157 return true;
1158
1159 /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
1160 * tracking the number of buffers doesn't overflow.
1161 */
1162 assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
1163
1164 bool separate_attribs_mode =
1165 prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
1166
1167 struct gl_program *xfb_prog = prog->last_vert_prog;
1168 xfb_prog->sh.LinkedTransformFeedback =
1169 rzalloc(xfb_prog, struct gl_transform_feedback_info);
1170
1171 /* The xfb_offset qualifier does not have to be used in increasing order
1172 * however some drivers expect to receive the list of transform feedback
1173 * declarations in order so sort it now for convenience.
1174 */
1175 if (has_xfb_qualifiers)
1176 qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
1177 cmp_xfb_offset);
1178
1179 xfb_prog->sh.LinkedTransformFeedback->Varyings =
1180 rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
1181 num_tfeedback_decls);
1182
1183 unsigned num_outputs = 0;
1184 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1185 if (tfeedback_decls[i].is_varying_written())
1186 num_outputs += tfeedback_decls[i].get_num_outputs();
1187 }
1188
1189 xfb_prog->sh.LinkedTransformFeedback->Outputs =
1190 rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
1191 num_outputs);
1192
1193 unsigned num_buffers = 0;
1194 unsigned buffers = 0;
1195
1196 if (!has_xfb_qualifiers && separate_attribs_mode) {
1197 /* GL_SEPARATE_ATTRIBS */
1198 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1199 if (!tfeedback_decls[i].store(ctx, prog,
1200 xfb_prog->sh.LinkedTransformFeedback,
1201 num_buffers, num_buffers, num_outputs,
1202 NULL, has_xfb_qualifiers))
1203 return false;
1204
1205 buffers |= 1 << num_buffers;
1206 num_buffers++;
1207 }
1208 }
1209 else {
1210 /* GL_INVERLEAVED_ATTRIBS */
1211 int buffer_stream_id = -1;
1212 unsigned buffer =
1213 num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
1214 bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
1215
1216 /* Apply any xfb_stride global qualifiers */
1217 if (has_xfb_qualifiers) {
1218 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1219 if (prog->TransformFeedback.BufferStride[j]) {
1220 buffers |= 1 << j;
1221 explicit_stride[j] = true;
1222 xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
1223 prog->TransformFeedback.BufferStride[j] / 4;
1224 }
1225 }
1226 }
1227
1228 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1229 if (has_xfb_qualifiers &&
1230 buffer != tfeedback_decls[i].get_buffer()) {
1231 /* we have moved to the next buffer so reset stream id */
1232 buffer_stream_id = -1;
1233 num_buffers++;
1234 }
1235
1236 if (tfeedback_decls[i].is_next_buffer_separator()) {
1237 if (!tfeedback_decls[i].store(ctx, prog,
1238 xfb_prog->sh.LinkedTransformFeedback,
1239 buffer, num_buffers, num_outputs,
1240 explicit_stride, has_xfb_qualifiers))
1241 return false;
1242 num_buffers++;
1243 buffer_stream_id = -1;
1244 continue;
1245 } else if (tfeedback_decls[i].is_varying()) {
1246 if (buffer_stream_id == -1) {
1247 /* First varying writing to this buffer: remember its stream */
1248 buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
1249 } else if (buffer_stream_id !=
1250 (int) tfeedback_decls[i].get_stream_id()) {
1251 /* Varying writes to the same buffer from a different stream */
1252 linker_error(prog,
1253 "Transform feedback can't capture varyings belonging "
1254 "to different vertex streams in a single buffer. "
1255 "Varying %s writes to buffer from stream %u, other "
1256 "varyings in the same buffer write from stream %u.",
1257 tfeedback_decls[i].name(),
1258 tfeedback_decls[i].get_stream_id(),
1259 buffer_stream_id);
1260 return false;
1261 }
1262 }
1263
1264 if (has_xfb_qualifiers) {
1265 buffer = tfeedback_decls[i].get_buffer();
1266 } else {
1267 buffer = num_buffers;
1268 }
1269 buffers |= 1 << buffer;
1270
1271 if (!tfeedback_decls[i].store(ctx, prog,
1272 xfb_prog->sh.LinkedTransformFeedback,
1273 buffer, num_buffers, num_outputs,
1274 explicit_stride, has_xfb_qualifiers))
1275 return false;
1276 }
1277 }
1278
1279 assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
1280
1281 xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
1282 return true;
1283 }
1284
1285 namespace {
1286
1287 /**
1288 * Data structure recording the relationship between outputs of one shader
1289 * stage (the "producer") and inputs of another (the "consumer").
1290 */
1291 class varying_matches
1292 {
1293 public:
1294 varying_matches(bool disable_varying_packing, bool xfb_enabled,
1295 bool enhanced_layouts_enabled,
1296 gl_shader_stage producer_stage,
1297 gl_shader_stage consumer_stage);
1298 ~varying_matches();
1299 void record(ir_variable *producer_var, ir_variable *consumer_var);
1300 unsigned assign_locations(struct gl_shader_program *prog,
1301 uint8_t *components,
1302 uint64_t reserved_slots);
1303 void store_locations() const;
1304
1305 private:
1306 bool is_varying_packing_safe(const glsl_type *type,
1307 const ir_variable *var);
1308
1309 /**
1310 * If true, this driver disables varying packing, so all varyings need to
1311 * be aligned on slot boundaries, and take up a number of slots equal to
1312 * their number of matrix columns times their array size.
1313 *
1314 * Packing may also be disabled because our current packing method is not
1315 * safe in SSO or versions of OpenGL where interpolation qualifiers are not
1316 * guaranteed to match across stages.
1317 */
1318 const bool disable_varying_packing;
1319
1320 /**
1321 * If true, this driver has transform feedback enabled. The transform
1322 * feedback code requires at least some packing be done even when varying
1323 * packing is disabled, fortunately where transform feedback requires
1324 * packing it's safe to override the disabled setting. See
1325 * is_varying_packing_safe().
1326 */
1327 const bool xfb_enabled;
1328
1329 const bool enhanced_layouts_enabled;
1330
1331 /**
1332 * Enum representing the order in which varyings are packed within a
1333 * packing class.
1334 *
1335 * Currently we pack vec4's first, then vec2's, then scalar values, then
1336 * vec3's. This order ensures that the only vectors that are at risk of
1337 * having to be "double parked" (split between two adjacent varying slots)
1338 * are the vec3's.
1339 */
1340 enum packing_order_enum {
1341 PACKING_ORDER_VEC4,
1342 PACKING_ORDER_VEC2,
1343 PACKING_ORDER_SCALAR,
1344 PACKING_ORDER_VEC3,
1345 };
1346
1347 static unsigned compute_packing_class(const ir_variable *var);
1348 static packing_order_enum compute_packing_order(const ir_variable *var);
1349 static int match_comparator(const void *x_generic, const void *y_generic);
1350 static int xfb_comparator(const void *x_generic, const void *y_generic);
1351
1352 /**
1353 * Structure recording the relationship between a single producer output
1354 * and a single consumer input.
1355 */
1356 struct match {
1357 /**
1358 * Packing class for this varying, computed by compute_packing_class().
1359 */
1360 unsigned packing_class;
1361
1362 /**
1363 * Packing order for this varying, computed by compute_packing_order().
1364 */
1365 packing_order_enum packing_order;
1366 unsigned num_components;
1367
1368 /**
1369 * The output variable in the producer stage.
1370 */
1371 ir_variable *producer_var;
1372
1373 /**
1374 * The input variable in the consumer stage.
1375 */
1376 ir_variable *consumer_var;
1377
1378 /**
1379 * The location which has been assigned for this varying. This is
1380 * expressed in multiples of a float, with the first generic varying
1381 * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
1382 * value 0.
1383 */
1384 unsigned generic_location;
1385 } *matches;
1386
1387 /**
1388 * The number of elements in the \c matches array that are currently in
1389 * use.
1390 */
1391 unsigned num_matches;
1392
1393 /**
1394 * The number of elements that were set aside for the \c matches array when
1395 * it was allocated.
1396 */
1397 unsigned matches_capacity;
1398
1399 gl_shader_stage producer_stage;
1400 gl_shader_stage consumer_stage;
1401 };
1402
1403 } /* anonymous namespace */
1404
1405 varying_matches::varying_matches(bool disable_varying_packing,
1406 bool xfb_enabled,
1407 bool enhanced_layouts_enabled,
1408 gl_shader_stage producer_stage,
1409 gl_shader_stage consumer_stage)
1410 : disable_varying_packing(disable_varying_packing),
1411 xfb_enabled(xfb_enabled),
1412 enhanced_layouts_enabled(enhanced_layouts_enabled),
1413 producer_stage(producer_stage),
1414 consumer_stage(consumer_stage)
1415 {
1416 /* Note: this initial capacity is rather arbitrarily chosen to be large
1417 * enough for many cases without wasting an unreasonable amount of space.
1418 * varying_matches::record() will resize the array if there are more than
1419 * this number of varyings.
1420 */
1421 this->matches_capacity = 8;
1422 this->matches = (match *)
1423 malloc(sizeof(*this->matches) * this->matches_capacity);
1424 this->num_matches = 0;
1425 }
1426
1427
1428 varying_matches::~varying_matches()
1429 {
1430 free(this->matches);
1431 }
1432
1433
1434 /**
1435 * Packing is always safe on individual arrays, structures, and matrices. It
1436 * is also safe if the varying is only used for transform feedback.
1437 */
1438 bool
1439 varying_matches::is_varying_packing_safe(const glsl_type *type,
1440 const ir_variable *var)
1441 {
1442 if (consumer_stage == MESA_SHADER_TESS_EVAL ||
1443 consumer_stage == MESA_SHADER_TESS_CTRL ||
1444 producer_stage == MESA_SHADER_TESS_CTRL)
1445 return false;
1446
1447 return xfb_enabled && (type->is_array() || type->is_record() ||
1448 type->is_matrix() || var->data.is_xfb_only);
1449 }
1450
1451
1452 /**
1453 * Record the given producer/consumer variable pair in the list of variables
1454 * that should later be assigned locations.
1455 *
1456 * It is permissible for \c consumer_var to be NULL (this happens if a
1457 * variable is output by the producer and consumed by transform feedback, but
1458 * not consumed by the consumer).
1459 *
1460 * If \c producer_var has already been paired up with a consumer_var, or
1461 * producer_var is part of fixed pipeline functionality (and hence already has
1462 * a location assigned), this function has no effect.
1463 *
1464 * Note: as a side effect this function may change the interpolation type of
1465 * \c producer_var, but only when the change couldn't possibly affect
1466 * rendering.
1467 */
1468 void
1469 varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
1470 {
1471 assert(producer_var != NULL || consumer_var != NULL);
1472
1473 if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
1474 producer_var->data.explicit_location)) ||
1475 (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
1476 consumer_var->data.explicit_location))) {
1477 /* Either a location already exists for this variable (since it is part
1478 * of fixed functionality), or it has already been recorded as part of a
1479 * previous match.
1480 */
1481 return;
1482 }
1483
1484 bool needs_flat_qualifier = consumer_var == NULL &&
1485 (producer_var->type->contains_integer() ||
1486 producer_var->type->contains_double());
1487
1488 if (!disable_varying_packing &&
1489 (needs_flat_qualifier ||
1490 (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
1491 /* Since this varying is not being consumed by the fragment shader, its
1492 * interpolation type varying cannot possibly affect rendering.
1493 * Also, this variable is non-flat and is (or contains) an integer
1494 * or a double.
1495 * If the consumer stage is unknown, don't modify the interpolation
1496 * type as it could affect rendering later with separate shaders.
1497 *
1498 * lower_packed_varyings requires all integer varyings to flat,
1499 * regardless of where they appear. We can trivially satisfy that
1500 * requirement by changing the interpolation type to flat here.
1501 */
1502 if (producer_var) {
1503 producer_var->data.centroid = false;
1504 producer_var->data.sample = false;
1505 producer_var->data.interpolation = INTERP_MODE_FLAT;
1506 }
1507
1508 if (consumer_var) {
1509 consumer_var->data.centroid = false;
1510 consumer_var->data.sample = false;
1511 consumer_var->data.interpolation = INTERP_MODE_FLAT;
1512 }
1513 }
1514
1515 if (this->num_matches == this->matches_capacity) {
1516 this->matches_capacity *= 2;
1517 this->matches = (match *)
1518 realloc(this->matches,
1519 sizeof(*this->matches) * this->matches_capacity);
1520 }
1521
1522 /* We must use the consumer to compute the packing class because in GL4.4+
1523 * there is no guarantee interpolation qualifiers will match across stages.
1524 *
1525 * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
1526 *
1527 * "The type and presence of interpolation qualifiers of variables with
1528 * the same name declared in all linked shaders for the same cross-stage
1529 * interface must match, otherwise the link command will fail.
1530 *
1531 * When comparing an output from one stage to an input of a subsequent
1532 * stage, the input and output don't match if their interpolation
1533 * qualifiers (or lack thereof) are not the same."
1534 *
1535 * This text was also in at least revison 7 of the 4.40 spec but is no
1536 * longer in revision 9 and not in the 4.50 spec.
1537 */
1538 const ir_variable *const var = (consumer_var != NULL)
1539 ? consumer_var : producer_var;
1540 const gl_shader_stage stage = (consumer_var != NULL)
1541 ? consumer_stage : producer_stage;
1542 const glsl_type *type = get_varying_type(var, stage);
1543
1544 if (producer_var && consumer_var &&
1545 consumer_var->data.must_be_shader_input) {
1546 producer_var->data.must_be_shader_input = 1;
1547 }
1548
1549 this->matches[this->num_matches].packing_class
1550 = this->compute_packing_class(var);
1551 this->matches[this->num_matches].packing_order
1552 = this->compute_packing_order(var);
1553 if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
1554 var->data.must_be_shader_input) {
1555 unsigned slots = type->count_attribute_slots(false);
1556 this->matches[this->num_matches].num_components = slots * 4;
1557 } else {
1558 this->matches[this->num_matches].num_components
1559 = type->component_slots();
1560 }
1561
1562 this->matches[this->num_matches].producer_var = producer_var;
1563 this->matches[this->num_matches].consumer_var = consumer_var;
1564 this->num_matches++;
1565 if (producer_var)
1566 producer_var->data.is_unmatched_generic_inout = 0;
1567 if (consumer_var)
1568 consumer_var->data.is_unmatched_generic_inout = 0;
1569 }
1570
1571
1572 /**
1573 * Choose locations for all of the variable matches that were previously
1574 * passed to varying_matches::record().
1575 */
1576 unsigned
1577 varying_matches::assign_locations(struct gl_shader_program *prog,
1578 uint8_t *components,
1579 uint64_t reserved_slots)
1580 {
1581 /* If packing has been disabled then we cannot safely sort the varyings by
1582 * class as it may mean we are using a version of OpenGL where
1583 * interpolation qualifiers are not guaranteed to be matching across
1584 * shaders, sorting in this case could result in mismatching shader
1585 * interfaces.
1586 * When packing is disabled the sort orders varyings used by transform
1587 * feedback first, but also depends on *undefined behaviour* of qsort to
1588 * reverse the order of the varyings. See: xfb_comparator().
1589 */
1590 if (!this->disable_varying_packing) {
1591 /* Sort varying matches into an order that makes them easy to pack. */
1592 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1593 &varying_matches::match_comparator);
1594 } else {
1595 /* Only sort varyings that are only used by transform feedback. */
1596 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1597 &varying_matches::xfb_comparator);
1598 }
1599
1600 unsigned generic_location = 0;
1601 unsigned generic_patch_location = MAX_VARYING*4;
1602 bool previous_var_xfb_only = false;
1603
1604 for (unsigned i = 0; i < this->num_matches; i++) {
1605 unsigned *location = &generic_location;
1606
1607 const ir_variable *var;
1608 const glsl_type *type;
1609 bool is_vertex_input = false;
1610 if (matches[i].consumer_var) {
1611 var = matches[i].consumer_var;
1612 type = get_varying_type(var, consumer_stage);
1613 if (consumer_stage == MESA_SHADER_VERTEX)
1614 is_vertex_input = true;
1615 } else {
1616 var = matches[i].producer_var;
1617 type = get_varying_type(var, producer_stage);
1618 }
1619
1620 if (var->data.patch)
1621 location = &generic_patch_location;
1622
1623 /* Advance to the next slot if this varying has a different packing
1624 * class than the previous one, and we're not already on a slot
1625 * boundary.
1626 *
1627 * Also advance to the next slot if packing is disabled. This makes sure
1628 * we don't assign varyings the same locations which is possible
1629 * because we still pack individual arrays, records and matrices even
1630 * when packing is disabled. Note we don't advance to the next slot if
1631 * we can pack varyings together that are only used for transform
1632 * feedback.
1633 */
1634 if (var->data.must_be_shader_input ||
1635 (this->disable_varying_packing &&
1636 !(previous_var_xfb_only && var->data.is_xfb_only)) ||
1637 (i > 0 && this->matches[i - 1].packing_class
1638 != this->matches[i].packing_class )) {
1639 *location = ALIGN(*location, 4);
1640 }
1641
1642 previous_var_xfb_only = var->data.is_xfb_only;
1643
1644 /* The number of components taken up by this variable. For vertex shader
1645 * inputs, we use the number of slots * 4, as they have different
1646 * counting rules.
1647 */
1648 unsigned num_components = is_vertex_input ?
1649 type->count_attribute_slots(is_vertex_input) * 4 :
1650 this->matches[i].num_components;
1651
1652 /* The last slot for this variable, inclusive. */
1653 unsigned slot_end = *location + num_components - 1;
1654
1655 /* FIXME: We could be smarter in the below code and loop back over
1656 * trying to fill any locations that we skipped because we couldn't pack
1657 * the varying between an explicit location. For now just let the user
1658 * hit the linking error if we run out of room and suggest they use
1659 * explicit locations.
1660 */
1661 while (slot_end < MAX_VARYING * 4u) {
1662 const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
1663 const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
1664
1665 assert(slots > 0);
1666 if (reserved_slots & slot_mask) {
1667 *location = ALIGN(*location + 1, 4);
1668 slot_end = *location + num_components - 1;
1669 continue;
1670 }
1671
1672 break;
1673 }
1674
1675 if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
1676 linker_error(prog, "insufficient contiguous locations available for "
1677 "%s it is possible an array or struct could not be "
1678 "packed between varyings with explicit locations. Try "
1679 "using an explicit location for arrays and structs.",
1680 var->name);
1681 }
1682
1683 if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
1684 for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
1685 components[j] = 4;
1686 components[slot_end / 4u] = (slot_end & 3) + 1;
1687 }
1688
1689 this->matches[i].generic_location = *location;
1690
1691 *location = slot_end + 1;
1692 }
1693
1694 return (generic_location + 3) / 4;
1695 }
1696
1697
1698 /**
1699 * Update the producer and consumer shaders to reflect the locations
1700 * assignments that were made by varying_matches::assign_locations().
1701 */
1702 void
1703 varying_matches::store_locations() const
1704 {
1705 /* Check is location needs to be packed with lower_packed_varyings() or if
1706 * we can just use ARB_enhanced_layouts packing.
1707 */
1708 bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
1709 const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
1710
1711 for (unsigned i = 0; i < this->num_matches; i++) {
1712 ir_variable *producer_var = this->matches[i].producer_var;
1713 ir_variable *consumer_var = this->matches[i].consumer_var;
1714 unsigned generic_location = this->matches[i].generic_location;
1715 unsigned slot = generic_location / 4;
1716 unsigned offset = generic_location % 4;
1717
1718 if (producer_var) {
1719 producer_var->data.location = VARYING_SLOT_VAR0 + slot;
1720 producer_var->data.location_frac = offset;
1721 }
1722
1723 if (consumer_var) {
1724 assert(consumer_var->data.location == -1);
1725 consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
1726 consumer_var->data.location_frac = offset;
1727 }
1728
1729 /* Find locations suitable for native packing via
1730 * ARB_enhanced_layouts.
1731 */
1732 if (producer_var && consumer_var) {
1733 if (enhanced_layouts_enabled) {
1734 const glsl_type *type =
1735 get_varying_type(producer_var, producer_stage);
1736 if (type->is_array() || type->is_matrix() || type->is_record() ||
1737 type->is_double()) {
1738 unsigned comp_slots = type->component_slots() + offset;
1739 unsigned slots = comp_slots / 4;
1740 if (comp_slots % 4)
1741 slots += 1;
1742
1743 for (unsigned j = 0; j < slots; j++) {
1744 pack_loc[slot + j] = true;
1745 }
1746 } else if (offset + type->vector_elements > 4) {
1747 pack_loc[slot] = true;
1748 pack_loc[slot + 1] = true;
1749 } else {
1750 loc_type[slot][offset] = type;
1751 }
1752 }
1753 }
1754 }
1755
1756 /* Attempt to use ARB_enhanced_layouts for more efficient packing if
1757 * suitable.
1758 */
1759 if (enhanced_layouts_enabled) {
1760 for (unsigned i = 0; i < this->num_matches; i++) {
1761 ir_variable *producer_var = this->matches[i].producer_var;
1762 ir_variable *consumer_var = this->matches[i].consumer_var;
1763 unsigned generic_location = this->matches[i].generic_location;
1764 unsigned slot = generic_location / 4;
1765
1766 if (pack_loc[slot] || !producer_var || !consumer_var)
1767 continue;
1768
1769 const glsl_type *type =
1770 get_varying_type(producer_var, producer_stage);
1771 bool type_match = true;
1772 for (unsigned j = 0; j < 4; j++) {
1773 if (loc_type[slot][j]) {
1774 if (type->base_type != loc_type[slot][j]->base_type)
1775 type_match = false;
1776 }
1777 }
1778
1779 if (type_match) {
1780 producer_var->data.explicit_location = 1;
1781 consumer_var->data.explicit_location = 1;
1782 producer_var->data.explicit_component = 1;
1783 consumer_var->data.explicit_component = 1;
1784 }
1785 }
1786 }
1787 }
1788
1789
1790 /**
1791 * Compute the "packing class" of the given varying. This is an unsigned
1792 * integer with the property that two variables in the same packing class can
1793 * be safely backed into the same vec4.
1794 */
1795 unsigned
1796 varying_matches::compute_packing_class(const ir_variable *var)
1797 {
1798 /* Without help from the back-end, there is no way to pack together
1799 * variables with different interpolation types, because
1800 * lower_packed_varyings must choose exactly one interpolation type for
1801 * each packed varying it creates.
1802 *
1803 * However, we can safely pack together floats, ints, and uints, because:
1804 *
1805 * - varyings of base type "int" and "uint" must use the "flat"
1806 * interpolation type, which can only occur in GLSL 1.30 and above.
1807 *
1808 * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
1809 * can store flat floats as ints without losing any information (using
1810 * the ir_unop_bitcast_* opcodes).
1811 *
1812 * Therefore, the packing class depends only on the interpolation type.
1813 */
1814 unsigned packing_class = var->data.centroid | (var->data.sample << 1) |
1815 (var->data.patch << 2) |
1816 (var->data.must_be_shader_input << 3);
1817 packing_class *= 8;
1818 packing_class += var->is_interpolation_flat()
1819 ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
1820 return packing_class;
1821 }
1822
1823
1824 /**
1825 * Compute the "packing order" of the given varying. This is a sort key we
1826 * use to determine when to attempt to pack the given varying relative to
1827 * other varyings in the same packing class.
1828 */
1829 varying_matches::packing_order_enum
1830 varying_matches::compute_packing_order(const ir_variable *var)
1831 {
1832 const glsl_type *element_type = var->type;
1833
1834 while (element_type->is_array()) {
1835 element_type = element_type->fields.array;
1836 }
1837
1838 switch (element_type->component_slots() % 4) {
1839 case 1: return PACKING_ORDER_SCALAR;
1840 case 2: return PACKING_ORDER_VEC2;
1841 case 3: return PACKING_ORDER_VEC3;
1842 case 0: return PACKING_ORDER_VEC4;
1843 default:
1844 assert(!"Unexpected value of vector_elements");
1845 return PACKING_ORDER_VEC4;
1846 }
1847 }
1848
1849
1850 /**
1851 * Comparison function passed to qsort() to sort varyings by packing_class and
1852 * then by packing_order.
1853 */
1854 int
1855 varying_matches::match_comparator(const void *x_generic, const void *y_generic)
1856 {
1857 const match *x = (const match *) x_generic;
1858 const match *y = (const match *) y_generic;
1859
1860 if (x->packing_class != y->packing_class)
1861 return x->packing_class - y->packing_class;
1862 return x->packing_order - y->packing_order;
1863 }
1864
1865
1866 /**
1867 * Comparison function passed to qsort() to sort varyings used only by
1868 * transform feedback when packing of other varyings is disabled.
1869 */
1870 int
1871 varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
1872 {
1873 const match *x = (const match *) x_generic;
1874
1875 if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
1876 return match_comparator(x_generic, y_generic);
1877
1878 /* FIXME: When the comparator returns 0 it means the elements being
1879 * compared are equivalent. However the qsort documentation says:
1880 *
1881 * "The order of equivalent elements is undefined."
1882 *
1883 * In practice the sort ends up reversing the order of the varyings which
1884 * means locations are also assigned in this reversed order and happens to
1885 * be what we want. This is also whats happening in
1886 * varying_matches::match_comparator().
1887 */
1888 return 0;
1889 }
1890
1891
1892 /**
1893 * Is the given variable a varying variable to be counted against the
1894 * limit in ctx->Const.MaxVarying?
1895 * This includes variables such as texcoords, colors and generic
1896 * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
1897 */
1898 static bool
1899 var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
1900 {
1901 /* Only fragment shaders will take a varying variable as an input */
1902 if (stage == MESA_SHADER_FRAGMENT &&
1903 var->data.mode == ir_var_shader_in) {
1904 switch (var->data.location) {
1905 case VARYING_SLOT_POS:
1906 case VARYING_SLOT_FACE:
1907 case VARYING_SLOT_PNTC:
1908 return false;
1909 default:
1910 return true;
1911 }
1912 }
1913 return false;
1914 }
1915
1916
1917 /**
1918 * Visitor class that generates tfeedback_candidate structs describing all
1919 * possible targets of transform feedback.
1920 *
1921 * tfeedback_candidate structs are stored in the hash table
1922 * tfeedback_candidates, which is passed to the constructor. This hash table
1923 * maps varying names to instances of the tfeedback_candidate struct.
1924 */
1925 class tfeedback_candidate_generator : public program_resource_visitor
1926 {
1927 public:
1928 tfeedback_candidate_generator(void *mem_ctx,
1929 hash_table *tfeedback_candidates)
1930 : mem_ctx(mem_ctx),
1931 tfeedback_candidates(tfeedback_candidates),
1932 toplevel_var(NULL),
1933 varying_floats(0)
1934 {
1935 }
1936
1937 void process(ir_variable *var)
1938 {
1939 /* All named varying interface blocks should be flattened by now */
1940 assert(!var->is_interface_instance());
1941
1942 this->toplevel_var = var;
1943 this->varying_floats = 0;
1944 program_resource_visitor::process(var, false);
1945 }
1946
1947 private:
1948 virtual void visit_field(const glsl_type *type, const char *name,
1949 bool /* row_major */,
1950 const glsl_type * /* record_type */,
1951 const enum glsl_interface_packing,
1952 bool /* last_field */)
1953 {
1954 assert(!type->without_array()->is_record());
1955 assert(!type->without_array()->is_interface());
1956
1957 tfeedback_candidate *candidate
1958 = rzalloc(this->mem_ctx, tfeedback_candidate);
1959 candidate->toplevel_var = this->toplevel_var;
1960 candidate->type = type;
1961 candidate->offset = this->varying_floats;
1962 _mesa_hash_table_insert(this->tfeedback_candidates,
1963 ralloc_strdup(this->mem_ctx, name),
1964 candidate);
1965 this->varying_floats += type->component_slots();
1966 }
1967
1968 /**
1969 * Memory context used to allocate hash table keys and values.
1970 */
1971 void * const mem_ctx;
1972
1973 /**
1974 * Hash table in which tfeedback_candidate objects should be stored.
1975 */
1976 hash_table * const tfeedback_candidates;
1977
1978 /**
1979 * Pointer to the toplevel variable that is being traversed.
1980 */
1981 ir_variable *toplevel_var;
1982
1983 /**
1984 * Total number of varying floats that have been visited so far. This is
1985 * used to determine the offset to each varying within the toplevel
1986 * variable.
1987 */
1988 unsigned varying_floats;
1989 };
1990
1991
1992 namespace linker {
1993
1994 void
1995 populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
1996 hash_table *consumer_inputs,
1997 hash_table *consumer_interface_inputs,
1998 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
1999 {
2000 memset(consumer_inputs_with_locations,
2001 0,
2002 sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
2003
2004 foreach_in_list(ir_instruction, node, ir) {
2005 ir_variable *const input_var = node->as_variable();
2006
2007 if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
2008 /* All interface blocks should have been lowered by this point */
2009 assert(!input_var->type->is_interface());
2010
2011 if (input_var->data.explicit_location) {
2012 /* assign_varying_locations only cares about finding the
2013 * ir_variable at the start of a contiguous location block.
2014 *
2015 * - For !producer, consumer_inputs_with_locations isn't used.
2016 *
2017 * - For !consumer, consumer_inputs_with_locations is empty.
2018 *
2019 * For consumer && producer, if you were trying to set some
2020 * ir_variable to the middle of a location block on the other side
2021 * of producer/consumer, cross_validate_outputs_to_inputs() should
2022 * be link-erroring due to either type mismatch or location
2023 * overlaps. If the variables do match up, then they've got a
2024 * matching data.location and you only looked at
2025 * consumer_inputs_with_locations[var->data.location], not any
2026 * following entries for the array/structure.
2027 */
2028 consumer_inputs_with_locations[input_var->data.location] =
2029 input_var;
2030 } else if (input_var->get_interface_type() != NULL) {
2031 char *const iface_field_name =
2032 ralloc_asprintf(mem_ctx, "%s.%s",
2033 input_var->get_interface_type()->without_array()->name,
2034 input_var->name);
2035 _mesa_hash_table_insert(consumer_interface_inputs,
2036 iface_field_name, input_var);
2037 } else {
2038 _mesa_hash_table_insert(consumer_inputs,
2039 ralloc_strdup(mem_ctx, input_var->name),
2040 input_var);
2041 }
2042 }
2043 }
2044 }
2045
2046 /**
2047 * Find a variable from the consumer that "matches" the specified variable
2048 *
2049 * This function only finds inputs with names that match. There is no
2050 * validation (here) that the types, etc. are compatible.
2051 */
2052 ir_variable *
2053 get_matching_input(void *mem_ctx,
2054 const ir_variable *output_var,
2055 hash_table *consumer_inputs,
2056 hash_table *consumer_interface_inputs,
2057 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2058 {
2059 ir_variable *input_var;
2060
2061 if (output_var->data.explicit_location) {
2062 input_var = consumer_inputs_with_locations[output_var->data.location];
2063 } else if (output_var->get_interface_type() != NULL) {
2064 char *const iface_field_name =
2065 ralloc_asprintf(mem_ctx, "%s.%s",
2066 output_var->get_interface_type()->without_array()->name,
2067 output_var->name);
2068 hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
2069 input_var = entry ? (ir_variable *) entry->data : NULL;
2070 } else {
2071 hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
2072 input_var = entry ? (ir_variable *) entry->data : NULL;
2073 }
2074
2075 return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2076 ? NULL : input_var;
2077 }
2078
2079 }
2080
2081 static int
2082 io_variable_cmp(const void *_a, const void *_b)
2083 {
2084 const ir_variable *const a = *(const ir_variable **) _a;
2085 const ir_variable *const b = *(const ir_variable **) _b;
2086
2087 if (a->data.explicit_location && b->data.explicit_location)
2088 return b->data.location - a->data.location;
2089
2090 if (a->data.explicit_location && !b->data.explicit_location)
2091 return 1;
2092
2093 if (!a->data.explicit_location && b->data.explicit_location)
2094 return -1;
2095
2096 return -strcmp(a->name, b->name);
2097 }
2098
2099 /**
2100 * Sort the shader IO variables into canonical order
2101 */
2102 static void
2103 canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
2104 {
2105 ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
2106 unsigned num_variables = 0;
2107
2108 foreach_in_list(ir_instruction, node, ir) {
2109 ir_variable *const var = node->as_variable();
2110
2111 if (var == NULL || var->data.mode != io_mode)
2112 continue;
2113
2114 /* If we have already encountered more I/O variables that could
2115 * successfully link, bail.
2116 */
2117 if (num_variables == ARRAY_SIZE(var_table))
2118 return;
2119
2120 var_table[num_variables++] = var;
2121 }
2122
2123 if (num_variables == 0)
2124 return;
2125
2126 /* Sort the list in reverse order (io_variable_cmp handles this). Later
2127 * we're going to push the variables on to the IR list as a stack, so we
2128 * want the last variable (in canonical order) to be first in the list.
2129 */
2130 qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
2131
2132 /* Remove the variable from it's current location in the IR, and put it at
2133 * the front.
2134 */
2135 for (unsigned i = 0; i < num_variables; i++) {
2136 var_table[i]->remove();
2137 ir->push_head(var_table[i]);
2138 }
2139 }
2140
2141 /**
2142 * Generate a bitfield map of the explicit locations for shader varyings.
2143 *
2144 * Note: For Tessellation shaders we are sitting right on the limits of the
2145 * 64 bit map. Per-vertex and per-patch both have separate location domains
2146 * with a max of MAX_VARYING.
2147 */
2148 static uint64_t
2149 reserved_varying_slot(struct gl_linked_shader *stage,
2150 ir_variable_mode io_mode)
2151 {
2152 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
2153 /* Avoid an overflow of the returned value */
2154 assert(MAX_VARYINGS_INCL_PATCH <= 64);
2155
2156 uint64_t slots = 0;
2157 int var_slot;
2158
2159 if (!stage)
2160 return slots;
2161
2162 foreach_in_list(ir_instruction, node, stage->ir) {
2163 ir_variable *const var = node->as_variable();
2164
2165 if (var == NULL || var->data.mode != io_mode ||
2166 !var->data.explicit_location ||
2167 var->data.location < VARYING_SLOT_VAR0)
2168 continue;
2169
2170 var_slot = var->data.location - VARYING_SLOT_VAR0;
2171
2172 unsigned num_elements = get_varying_type(var, stage->Stage)
2173 ->count_attribute_slots(io_mode == ir_var_shader_in &&
2174 stage->Stage == MESA_SHADER_VERTEX);
2175 for (unsigned i = 0; i < num_elements; i++) {
2176 if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
2177 slots |= UINT64_C(1) << var_slot;
2178 var_slot += 1;
2179 }
2180 }
2181
2182 return slots;
2183 }
2184
2185
2186 /**
2187 * Assign locations for all variables that are produced in one pipeline stage
2188 * (the "producer") and consumed in the next stage (the "consumer").
2189 *
2190 * Variables produced by the producer may also be consumed by transform
2191 * feedback.
2192 *
2193 * \param num_tfeedback_decls is the number of declarations indicating
2194 * variables that may be consumed by transform feedback.
2195 *
2196 * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
2197 * representing the result of parsing the strings passed to
2198 * glTransformFeedbackVaryings(). assign_location() will be called for
2199 * each of these objects that matches one of the outputs of the
2200 * producer.
2201 *
2202 * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
2203 * be NULL. In this case, varying locations are assigned solely based on the
2204 * requirements of transform feedback.
2205 */
2206 static bool
2207 assign_varying_locations(struct gl_context *ctx,
2208 void *mem_ctx,
2209 struct gl_shader_program *prog,
2210 gl_linked_shader *producer,
2211 gl_linked_shader *consumer,
2212 unsigned num_tfeedback_decls,
2213 tfeedback_decl *tfeedback_decls,
2214 const uint64_t reserved_slots)
2215 {
2216 /* Tessellation shaders treat inputs and outputs as shared memory and can
2217 * access inputs and outputs of other invocations.
2218 * Therefore, they can't be lowered to temps easily (and definitely not
2219 * efficiently).
2220 */
2221 bool unpackable_tess =
2222 (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
2223 (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
2224 (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
2225
2226 /* Transform feedback code assumes varying arrays are packed, so if the
2227 * driver has disabled varying packing, make sure to at least enable
2228 * packing required by transform feedback.
2229 */
2230 bool xfb_enabled =
2231 ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
2232
2233 /* Disable packing on outward facing interfaces for SSO because in ES we
2234 * need to retain the unpacked varying information for draw time
2235 * validation.
2236 *
2237 * Packing is still enabled on individual arrays, structs, and matrices as
2238 * these are required by the transform feedback code and it is still safe
2239 * to do so. We also enable packing when a varying is only used for
2240 * transform feedback and its not a SSO.
2241 */
2242 bool disable_varying_packing =
2243 ctx->Const.DisableVaryingPacking || unpackable_tess;
2244 if (prog->SeparateShader && (producer == NULL || consumer == NULL))
2245 disable_varying_packing = true;
2246
2247 varying_matches matches(disable_varying_packing, xfb_enabled,
2248 ctx->Extensions.ARB_enhanced_layouts,
2249 producer ? producer->Stage : MESA_SHADER_NONE,
2250 consumer ? consumer->Stage : MESA_SHADER_NONE);
2251 hash_table *tfeedback_candidates =
2252 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2253 _mesa_key_string_equal);
2254 hash_table *consumer_inputs =
2255 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2256 _mesa_key_string_equal);
2257 hash_table *consumer_interface_inputs =
2258 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2259 _mesa_key_string_equal);
2260 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
2261 NULL,
2262 };
2263
2264 unsigned consumer_vertices = 0;
2265 if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
2266 consumer_vertices = prog->Geom.VerticesIn;
2267
2268 /* Operate in a total of four passes.
2269 *
2270 * 1. Sort inputs / outputs into a canonical order. This is necessary so
2271 * that inputs / outputs of separable shaders will be assigned
2272 * predictable locations regardless of the order in which declarations
2273 * appeared in the shader source.
2274 *
2275 * 2. Assign locations for any matching inputs and outputs.
2276 *
2277 * 3. Mark output variables in the producer that do not have locations as
2278 * not being outputs. This lets the optimizer eliminate them.
2279 *
2280 * 4. Mark input variables in the consumer that do not have locations as
2281 * not being inputs. This lets the optimizer eliminate them.
2282 */
2283 if (consumer)
2284 canonicalize_shader_io(consumer->ir, ir_var_shader_in);
2285
2286 if (producer)
2287 canonicalize_shader_io(producer->ir, ir_var_shader_out);
2288
2289 if (consumer)
2290 linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
2291 consumer_inputs,
2292 consumer_interface_inputs,
2293 consumer_inputs_with_locations);
2294
2295 if (producer) {
2296 foreach_in_list(ir_instruction, node, producer->ir) {
2297 ir_variable *const output_var = node->as_variable();
2298
2299 if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
2300 continue;
2301
2302 /* Only geometry shaders can use non-zero streams */
2303 assert(output_var->data.stream == 0 ||
2304 (output_var->data.stream < MAX_VERTEX_STREAMS &&
2305 producer->Stage == MESA_SHADER_GEOMETRY));
2306
2307 if (num_tfeedback_decls > 0) {
2308 tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates);
2309 g.process(output_var);
2310 }
2311
2312 ir_variable *const input_var =
2313 linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
2314 consumer_interface_inputs,
2315 consumer_inputs_with_locations);
2316
2317 /* If a matching input variable was found, add this output (and the
2318 * input) to the set. If this is a separable program and there is no
2319 * consumer stage, add the output.
2320 *
2321 * Always add TCS outputs. They are shared by all invocations
2322 * within a patch and can be used as shared memory.
2323 */
2324 if (input_var || (prog->SeparateShader && consumer == NULL) ||
2325 producer->Stage == MESA_SHADER_TESS_CTRL) {
2326 matches.record(output_var, input_var);
2327 }
2328
2329 /* Only stream 0 outputs can be consumed in the next stage */
2330 if (input_var && output_var->data.stream != 0) {
2331 linker_error(prog, "output %s is assigned to stream=%d but "
2332 "is linked to an input, which requires stream=0",
2333 output_var->name, output_var->data.stream);
2334 return false;
2335 }
2336 }
2337 } else {
2338 /* If there's no producer stage, then this must be a separable program.
2339 * For example, we may have a program that has just a fragment shader.
2340 * Later this program will be used with some arbitrary vertex (or
2341 * geometry) shader program. This means that locations must be assigned
2342 * for all the inputs.
2343 */
2344 foreach_in_list(ir_instruction, node, consumer->ir) {
2345 ir_variable *const input_var = node->as_variable();
2346
2347 if (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2348 continue;
2349
2350 matches.record(NULL, input_var);
2351 }
2352 }
2353
2354 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2355 if (!tfeedback_decls[i].is_varying())
2356 continue;
2357
2358 const tfeedback_candidate *matched_candidate
2359 = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
2360
2361 if (matched_candidate == NULL) {
2362 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2363 return false;
2364 }
2365
2366 /* Mark xfb varyings as always active */
2367 matched_candidate->toplevel_var->data.always_active_io = 1;
2368
2369 /* Mark any corresponding inputs as always active also. We must do this
2370 * because we have a NIR pass that lowers vectors to scalars and another
2371 * that removes unused varyings.
2372 * We don't split varyings marked as always active because there is no
2373 * point in doing so. This means we need to mark both sides of the
2374 * interface as always active otherwise we will have a mismatch and
2375 * start removing things we shouldn't.
2376 */
2377 ir_variable *const input_var =
2378 linker::get_matching_input(mem_ctx, matched_candidate->toplevel_var,
2379 consumer_inputs,
2380 consumer_interface_inputs,
2381 consumer_inputs_with_locations);
2382 if (input_var)
2383 input_var->data.always_active_io = 1;
2384
2385 if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
2386 matched_candidate->toplevel_var->data.is_xfb_only = 1;
2387 matches.record(matched_candidate->toplevel_var, NULL);
2388 }
2389 }
2390
2391 _mesa_hash_table_destroy(consumer_inputs, NULL);
2392 _mesa_hash_table_destroy(consumer_interface_inputs, NULL);
2393
2394 uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
2395 const unsigned slots_used = matches.assign_locations(
2396 prog, components, reserved_slots);
2397 matches.store_locations();
2398
2399 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2400 if (!tfeedback_decls[i].is_varying())
2401 continue;
2402
2403 if (!tfeedback_decls[i].assign_location(ctx, prog)) {
2404 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2405 return false;
2406 }
2407 }
2408 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2409
2410 if (consumer && producer) {
2411 foreach_in_list(ir_instruction, node, consumer->ir) {
2412 ir_variable *const var = node->as_variable();
2413
2414 if (var && var->data.mode == ir_var_shader_in &&
2415 var->data.is_unmatched_generic_inout) {
2416 if (!prog->IsES && prog->data->Version <= 120) {
2417 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
2418 *
2419 * Only those varying variables used (i.e. read) in
2420 * the fragment shader executable must be written to
2421 * by the vertex shader executable; declaring
2422 * superfluous varying variables in a vertex shader is
2423 * permissible.
2424 *
2425 * We interpret this text as meaning that the VS must
2426 * write the variable for the FS to read it. See
2427 * "glsl1-varying read but not written" in piglit.
2428 */
2429 linker_error(prog, "%s shader varying %s not written "
2430 "by %s shader\n.",
2431 _mesa_shader_stage_to_string(consumer->Stage),
2432 var->name,
2433 _mesa_shader_stage_to_string(producer->Stage));
2434 } else {
2435 linker_warning(prog, "%s shader varying %s not written "
2436 "by %s shader\n.",
2437 _mesa_shader_stage_to_string(consumer->Stage),
2438 var->name,
2439 _mesa_shader_stage_to_string(producer->Stage));
2440 }
2441 }
2442 }
2443
2444 /* Now that validation is done its safe to remove unused varyings. As
2445 * we have both a producer and consumer its safe to remove unused
2446 * varyings even if the program is a SSO because the stages are being
2447 * linked together i.e. we have a multi-stage SSO.
2448 */
2449 remove_unused_shader_inputs_and_outputs(false, producer,
2450 ir_var_shader_out);
2451 remove_unused_shader_inputs_and_outputs(false, consumer,
2452 ir_var_shader_in);
2453 }
2454
2455 if (producer) {
2456 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
2457 0, producer, disable_varying_packing,
2458 xfb_enabled);
2459 }
2460
2461 if (consumer) {
2462 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
2463 consumer_vertices, consumer,
2464 disable_varying_packing, xfb_enabled);
2465 }
2466
2467 return true;
2468 }
2469
2470 static bool
2471 check_against_output_limit(struct gl_context *ctx,
2472 struct gl_shader_program *prog,
2473 gl_linked_shader *producer,
2474 unsigned num_explicit_locations)
2475 {
2476 unsigned output_vectors = num_explicit_locations;
2477
2478 foreach_in_list(ir_instruction, node, producer->ir) {
2479 ir_variable *const var = node->as_variable();
2480
2481 if (var && !var->data.explicit_location &&
2482 var->data.mode == ir_var_shader_out &&
2483 var_counts_against_varying_limit(producer->Stage, var)) {
2484 /* outputs for fragment shader can't be doubles */
2485 output_vectors += var->type->count_attribute_slots(false);
2486 }
2487 }
2488
2489 assert(producer->Stage != MESA_SHADER_FRAGMENT);
2490 unsigned max_output_components =
2491 ctx->Const.Program[producer->Stage].MaxOutputComponents;
2492
2493 const unsigned output_components = output_vectors * 4;
2494 if (output_components > max_output_components) {
2495 if (ctx->API == API_OPENGLES2 || prog->IsES)
2496 linker_error(prog, "%s shader uses too many output vectors "
2497 "(%u > %u)\n",
2498 _mesa_shader_stage_to_string(producer->Stage),
2499 output_vectors,
2500 max_output_components / 4);
2501 else
2502 linker_error(prog, "%s shader uses too many output components "
2503 "(%u > %u)\n",
2504 _mesa_shader_stage_to_string(producer->Stage),
2505 output_components,
2506 max_output_components);
2507
2508 return false;
2509 }
2510
2511 return true;
2512 }
2513
2514 static bool
2515 check_against_input_limit(struct gl_context *ctx,
2516 struct gl_shader_program *prog,
2517 gl_linked_shader *consumer,
2518 unsigned num_explicit_locations)
2519 {
2520 unsigned input_vectors = num_explicit_locations;
2521
2522 foreach_in_list(ir_instruction, node, consumer->ir) {
2523 ir_variable *const var = node->as_variable();
2524
2525 if (var && !var->data.explicit_location &&
2526 var->data.mode == ir_var_shader_in &&
2527 var_counts_against_varying_limit(consumer->Stage, var)) {
2528 /* vertex inputs aren't varying counted */
2529 input_vectors += var->type->count_attribute_slots(false);
2530 }
2531 }
2532
2533 assert(consumer->Stage != MESA_SHADER_VERTEX);
2534 unsigned max_input_components =
2535 ctx->Const.Program[consumer->Stage].MaxInputComponents;
2536
2537 const unsigned input_components = input_vectors * 4;
2538 if (input_components > max_input_components) {
2539 if (ctx->API == API_OPENGLES2 || prog->IsES)
2540 linker_error(prog, "%s shader uses too many input vectors "
2541 "(%u > %u)\n",
2542 _mesa_shader_stage_to_string(consumer->Stage),
2543 input_vectors,
2544 max_input_components / 4);
2545 else
2546 linker_error(prog, "%s shader uses too many input components "
2547 "(%u > %u)\n",
2548 _mesa_shader_stage_to_string(consumer->Stage),
2549 input_components,
2550 max_input_components);
2551
2552 return false;
2553 }
2554
2555 return true;
2556 }
2557
2558 bool
2559 link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
2560 struct gl_context *ctx, void *mem_ctx)
2561 {
2562 bool has_xfb_qualifiers = false;
2563 unsigned num_tfeedback_decls = 0;
2564 char **varying_names = NULL;
2565 tfeedback_decl *tfeedback_decls = NULL;
2566
2567 /* From the ARB_enhanced_layouts spec:
2568 *
2569 * "If the shader used to record output variables for transform feedback
2570 * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
2571 * qualifiers, the values specified by TransformFeedbackVaryings are
2572 * ignored, and the set of variables captured for transform feedback is
2573 * instead derived from the specified layout qualifiers."
2574 */
2575 for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
2576 /* Find last stage before fragment shader */
2577 if (prog->_LinkedShaders[i]) {
2578 has_xfb_qualifiers =
2579 process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
2580 prog, &num_tfeedback_decls,
2581 &varying_names);
2582 break;
2583 }
2584 }
2585
2586 if (!has_xfb_qualifiers) {
2587 num_tfeedback_decls = prog->TransformFeedback.NumVarying;
2588 varying_names = prog->TransformFeedback.VaryingNames;
2589 }
2590
2591 if (num_tfeedback_decls != 0) {
2592 /* From GL_EXT_transform_feedback:
2593 * A program will fail to link if:
2594 *
2595 * * the <count> specified by TransformFeedbackVaryingsEXT is
2596 * non-zero, but the program object has no vertex or geometry
2597 * shader;
2598 */
2599 if (first >= MESA_SHADER_FRAGMENT) {
2600 linker_error(prog, "Transform feedback varyings specified, but "
2601 "no vertex, tessellation, or geometry shader is "
2602 "present.\n");
2603 return false;
2604 }
2605
2606 tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
2607 num_tfeedback_decls);
2608 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2609 varying_names, tfeedback_decls))
2610 return false;
2611 }
2612
2613 /* If there is no fragment shader we need to set transform feedback.
2614 *
2615 * For SSO we also need to assign output locations. We assign them here
2616 * because we need to do it for both single stage programs and multi stage
2617 * programs.
2618 */
2619 if (last < MESA_SHADER_FRAGMENT &&
2620 (num_tfeedback_decls != 0 || prog->SeparateShader)) {
2621 const uint64_t reserved_out_slots =
2622 reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
2623 if (!assign_varying_locations(ctx, mem_ctx, prog,
2624 prog->_LinkedShaders[last], NULL,
2625 num_tfeedback_decls, tfeedback_decls,
2626 reserved_out_slots))
2627 return false;
2628 }
2629
2630 if (last <= MESA_SHADER_FRAGMENT) {
2631 /* Remove unused varyings from the first/last stage unless SSO */
2632 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2633 prog->_LinkedShaders[first],
2634 ir_var_shader_in);
2635 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2636 prog->_LinkedShaders[last],
2637 ir_var_shader_out);
2638
2639 /* If the program is made up of only a single stage */
2640 if (first == last) {
2641 gl_linked_shader *const sh = prog->_LinkedShaders[last];
2642
2643 do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
2644 do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
2645 tfeedback_decls);
2646
2647 if (prog->SeparateShader) {
2648 const uint64_t reserved_slots =
2649 reserved_varying_slot(sh, ir_var_shader_in);
2650
2651 /* Assign input locations for SSO, output locations are already
2652 * assigned.
2653 */
2654 if (!assign_varying_locations(ctx, mem_ctx, prog,
2655 NULL /* producer */,
2656 sh /* consumer */,
2657 0 /* num_tfeedback_decls */,
2658 NULL /* tfeedback_decls */,
2659 reserved_slots))
2660 return false;
2661 }
2662 } else {
2663 /* Linking the stages in the opposite order (from fragment to vertex)
2664 * ensures that inter-shader outputs written to in an earlier stage
2665 * are eliminated if they are (transitively) not used in a later
2666 * stage.
2667 */
2668 int next = last;
2669 for (int i = next - 1; i >= 0; i--) {
2670 if (prog->_LinkedShaders[i] == NULL && i != 0)
2671 continue;
2672
2673 gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
2674 gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
2675
2676 const uint64_t reserved_out_slots =
2677 reserved_varying_slot(sh_i, ir_var_shader_out);
2678 const uint64_t reserved_in_slots =
2679 reserved_varying_slot(sh_next, ir_var_shader_in);
2680
2681 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2682 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2683 tfeedback_decls);
2684
2685 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2686 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2687 tfeedback_decls,
2688 reserved_out_slots | reserved_in_slots))
2689 return false;
2690
2691 /* This must be done after all dead varyings are eliminated. */
2692 if (sh_i != NULL) {
2693 unsigned slots_used = _mesa_bitcount_64(reserved_out_slots);
2694 if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
2695 return false;
2696 }
2697 }
2698
2699 unsigned slots_used = _mesa_bitcount_64(reserved_in_slots);
2700 if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
2701 return false;
2702
2703 next = i;
2704 }
2705 }
2706 }
2707
2708 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
2709 has_xfb_qualifiers))
2710 return false;
2711
2712 return true;
2713 }