glsl/linker: outputs in the same location must share interpolation
[mesa.git] / src / compiler / glsl / link_varyings.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file link_varyings.cpp
26 *
27 * Linker functions related specifically to linking varyings between shader
28 * stages.
29 */
30
31
32 #include "main/mtypes.h"
33 #include "glsl_symbol_table.h"
34 #include "glsl_parser_extras.h"
35 #include "ir_optimization.h"
36 #include "linker.h"
37 #include "link_varyings.h"
38 #include "main/macros.h"
39 #include "util/hash_table.h"
40 #include "program.h"
41
42
43 /**
44 * Get the varying type stripped of the outermost array if we're processing
45 * a stage whose varyings are arrays indexed by a vertex number (such as
46 * geometry shader inputs).
47 */
48 static const glsl_type *
49 get_varying_type(const ir_variable *var, gl_shader_stage stage)
50 {
51 const glsl_type *type = var->type;
52
53 if (!var->data.patch &&
54 ((var->data.mode == ir_var_shader_out &&
55 stage == MESA_SHADER_TESS_CTRL) ||
56 (var->data.mode == ir_var_shader_in &&
57 (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
58 stage == MESA_SHADER_GEOMETRY)))) {
59 assert(type->is_array());
60 type = type->fields.array;
61 }
62
63 return type;
64 }
65
66 static void
67 create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
68 size_t name_length, unsigned *count,
69 const char *ifc_member_name,
70 const glsl_type *ifc_member_t, char ***varying_names)
71 {
72 if (t->is_interface()) {
73 size_t new_length = name_length;
74
75 assert(ifc_member_name && ifc_member_t);
76 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
77
78 create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
79 NULL, NULL, varying_names);
80 } else if (t->is_record()) {
81 for (unsigned i = 0; i < t->length; i++) {
82 const char *field = t->fields.structure[i].name;
83 size_t new_length = name_length;
84
85 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
86
87 create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
88 new_length, count, NULL, NULL,
89 varying_names);
90 }
91 } else if (t->without_array()->is_record() ||
92 t->without_array()->is_interface() ||
93 (t->is_array() && t->fields.array->is_array())) {
94 for (unsigned i = 0; i < t->length; i++) {
95 size_t new_length = name_length;
96
97 /* Append the subscript to the current variable name */
98 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
99
100 create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
101 count, ifc_member_name, ifc_member_t,
102 varying_names);
103 }
104 } else {
105 (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
106 }
107 }
108
109 static bool
110 process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
111 struct gl_shader_program *prog,
112 unsigned *num_tfeedback_decls,
113 char ***varying_names)
114 {
115 bool has_xfb_qualifiers = false;
116
117 /* We still need to enable transform feedback mode even if xfb_stride is
118 * only applied to a global out. Also we don't bother to propagate
119 * xfb_stride to interface block members so this will catch that case also.
120 */
121 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
122 if (prog->TransformFeedback.BufferStride[j]) {
123 has_xfb_qualifiers = true;
124 break;
125 }
126 }
127
128 foreach_in_list(ir_instruction, node, sh->ir) {
129 ir_variable *var = node->as_variable();
130 if (!var || var->data.mode != ir_var_shader_out)
131 continue;
132
133 /* From the ARB_enhanced_layouts spec:
134 *
135 * "Any shader making any static use (after preprocessing) of any of
136 * these *xfb_* qualifiers will cause the shader to be in a
137 * transform feedback capturing mode and hence responsible for
138 * describing the transform feedback setup. This mode will capture
139 * any output selected by *xfb_offset*, directly or indirectly, to
140 * a transform feedback buffer."
141 */
142 if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
143 has_xfb_qualifiers = true;
144 }
145
146 if (var->data.explicit_xfb_offset) {
147 *num_tfeedback_decls += var->type->varying_count();
148 has_xfb_qualifiers = true;
149 }
150 }
151
152 if (*num_tfeedback_decls == 0)
153 return has_xfb_qualifiers;
154
155 unsigned i = 0;
156 *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
157 foreach_in_list(ir_instruction, node, sh->ir) {
158 ir_variable *var = node->as_variable();
159 if (!var || var->data.mode != ir_var_shader_out)
160 continue;
161
162 if (var->data.explicit_xfb_offset) {
163 char *name;
164 const glsl_type *type, *member_type;
165
166 if (var->data.from_named_ifc_block) {
167 type = var->get_interface_type();
168 /* Find the member type before it was altered by lowering */
169 member_type =
170 type->fields.structure[type->field_index(var->name)].type;
171 name = ralloc_strdup(NULL, type->without_array()->name);
172 } else {
173 type = var->type;
174 member_type = NULL;
175 name = ralloc_strdup(NULL, var->name);
176 }
177 create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
178 var->name, member_type, varying_names);
179 ralloc_free(name);
180 }
181 }
182
183 assert(i == *num_tfeedback_decls);
184 return has_xfb_qualifiers;
185 }
186
187 /**
188 * Validate the types and qualifiers of an output from one stage against the
189 * matching input to another stage.
190 */
191 static void
192 cross_validate_types_and_qualifiers(struct gl_shader_program *prog,
193 const ir_variable *input,
194 const ir_variable *output,
195 gl_shader_stage consumer_stage,
196 gl_shader_stage producer_stage)
197 {
198 /* Check that the types match between stages.
199 */
200 const glsl_type *type_to_match = input->type;
201
202 /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
203 const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
204 consumer_stage != MESA_SHADER_FRAGMENT) ||
205 consumer_stage == MESA_SHADER_GEOMETRY;
206 if (extra_array_level) {
207 assert(type_to_match->is_array());
208 type_to_match = type_to_match->fields.array;
209 }
210
211 if (type_to_match != output->type) {
212 /* There is a bit of a special case for gl_TexCoord. This
213 * built-in is unsized by default. Applications that variable
214 * access it must redeclare it with a size. There is some
215 * language in the GLSL spec that implies the fragment shader
216 * and vertex shader do not have to agree on this size. Other
217 * driver behave this way, and one or two applications seem to
218 * rely on it.
219 *
220 * Neither declaration needs to be modified here because the array
221 * sizes are fixed later when update_array_sizes is called.
222 *
223 * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
224 *
225 * "Unlike user-defined varying variables, the built-in
226 * varying variables don't have a strict one-to-one
227 * correspondence between the vertex language and the
228 * fragment language."
229 */
230 if (!output->type->is_array() || !is_gl_identifier(output->name)) {
231 linker_error(prog,
232 "%s shader output `%s' declared as type `%s', "
233 "but %s shader input declared as type `%s'\n",
234 _mesa_shader_stage_to_string(producer_stage),
235 output->name,
236 output->type->name,
237 _mesa_shader_stage_to_string(consumer_stage),
238 input->type->name);
239 return;
240 }
241 }
242
243 /* Check that all of the qualifiers match between stages.
244 */
245
246 /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
247 * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
248 * conformance test suite does not verify that the qualifiers must match.
249 * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
250 * OpenGLES 3.0 drivers, so we relax the checking in all cases.
251 */
252 if (false /* always skip the centroid check */ &&
253 prog->data->Version < (prog->IsES ? 310 : 430) &&
254 input->data.centroid != output->data.centroid) {
255 linker_error(prog,
256 "%s shader output `%s' %s centroid qualifier, "
257 "but %s shader input %s centroid qualifier\n",
258 _mesa_shader_stage_to_string(producer_stage),
259 output->name,
260 (output->data.centroid) ? "has" : "lacks",
261 _mesa_shader_stage_to_string(consumer_stage),
262 (input->data.centroid) ? "has" : "lacks");
263 return;
264 }
265
266 if (input->data.sample != output->data.sample) {
267 linker_error(prog,
268 "%s shader output `%s' %s sample qualifier, "
269 "but %s shader input %s sample qualifier\n",
270 _mesa_shader_stage_to_string(producer_stage),
271 output->name,
272 (output->data.sample) ? "has" : "lacks",
273 _mesa_shader_stage_to_string(consumer_stage),
274 (input->data.sample) ? "has" : "lacks");
275 return;
276 }
277
278 if (input->data.patch != output->data.patch) {
279 linker_error(prog,
280 "%s shader output `%s' %s patch qualifier, "
281 "but %s shader input %s patch qualifier\n",
282 _mesa_shader_stage_to_string(producer_stage),
283 output->name,
284 (output->data.patch) ? "has" : "lacks",
285 _mesa_shader_stage_to_string(consumer_stage),
286 (input->data.patch) ? "has" : "lacks");
287 return;
288 }
289
290 /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
291 *
292 * "As only outputs need be declared with invariant, an output from
293 * one shader stage will still match an input of a subsequent stage
294 * without the input being declared as invariant."
295 *
296 * while GLSL 4.20 says:
297 *
298 * "For variables leaving one shader and coming into another shader,
299 * the invariant keyword has to be used in both shaders, or a link
300 * error will result."
301 *
302 * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
303 *
304 * "The invariance of varyings that are declared in both the vertex
305 * and fragment shaders must match."
306 */
307 if (input->data.invariant != output->data.invariant &&
308 prog->data->Version < (prog->IsES ? 300 : 430)) {
309 linker_error(prog,
310 "%s shader output `%s' %s invariant qualifier, "
311 "but %s shader input %s invariant qualifier\n",
312 _mesa_shader_stage_to_string(producer_stage),
313 output->name,
314 (output->data.invariant) ? "has" : "lacks",
315 _mesa_shader_stage_to_string(consumer_stage),
316 (input->data.invariant) ? "has" : "lacks");
317 return;
318 }
319
320 /* GLSL >= 4.40 removes text requiring interpolation qualifiers
321 * to match cross stage, they must only match within the same stage.
322 *
323 * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
324 *
325 * "It is a link-time error if, within the same stage, the interpolation
326 * qualifiers of variables of the same name do not match.
327 *
328 * Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
329 *
330 * "When no interpolation qualifier is present, smooth interpolation
331 * is used."
332 *
333 * So we match variables where one is smooth and the other has no explicit
334 * qualifier.
335 */
336 unsigned input_interpolation = input->data.interpolation;
337 unsigned output_interpolation = output->data.interpolation;
338 if (prog->IsES) {
339 if (input_interpolation == INTERP_MODE_NONE)
340 input_interpolation = INTERP_MODE_SMOOTH;
341 if (output_interpolation == INTERP_MODE_NONE)
342 output_interpolation = INTERP_MODE_SMOOTH;
343 }
344 if (input_interpolation != output_interpolation &&
345 prog->data->Version < 440) {
346 linker_error(prog,
347 "%s shader output `%s' specifies %s "
348 "interpolation qualifier, "
349 "but %s shader input specifies %s "
350 "interpolation qualifier\n",
351 _mesa_shader_stage_to_string(producer_stage),
352 output->name,
353 interpolation_string(output->data.interpolation),
354 _mesa_shader_stage_to_string(consumer_stage),
355 interpolation_string(input->data.interpolation));
356 return;
357 }
358 }
359
360 /**
361 * Validate front and back color outputs against single color input
362 */
363 static void
364 cross_validate_front_and_back_color(struct gl_shader_program *prog,
365 const ir_variable *input,
366 const ir_variable *front_color,
367 const ir_variable *back_color,
368 gl_shader_stage consumer_stage,
369 gl_shader_stage producer_stage)
370 {
371 if (front_color != NULL && front_color->data.assigned)
372 cross_validate_types_and_qualifiers(prog, input, front_color,
373 consumer_stage, producer_stage);
374
375 if (back_color != NULL && back_color->data.assigned)
376 cross_validate_types_and_qualifiers(prog, input, back_color,
377 consumer_stage, producer_stage);
378 }
379
380 static unsigned
381 compute_variable_location_slot(ir_variable *var, gl_shader_stage stage)
382 {
383 unsigned location_start = VARYING_SLOT_VAR0;
384
385 switch (stage) {
386 case MESA_SHADER_VERTEX:
387 if (var->data.mode == ir_var_shader_in)
388 location_start = VERT_ATTRIB_GENERIC0;
389 break;
390 case MESA_SHADER_TESS_CTRL:
391 case MESA_SHADER_TESS_EVAL:
392 if (var->data.patch)
393 location_start = VARYING_SLOT_PATCH0;
394 break;
395 case MESA_SHADER_FRAGMENT:
396 if (var->data.mode == ir_var_shader_out)
397 location_start = FRAG_RESULT_DATA0;
398 break;
399 default:
400 break;
401 }
402
403 return var->data.location - location_start;
404 }
405
406 struct explicit_location_info {
407 ir_variable *var;
408 unsigned base_type;
409 unsigned interpolation;
410 };
411
412 static bool
413 check_location_aliasing(struct explicit_location_info explicit_locations[][4],
414 ir_variable *var,
415 unsigned location,
416 unsigned component,
417 unsigned location_limit,
418 const glsl_type *type,
419 unsigned interpolation,
420 gl_shader_program *prog,
421 gl_shader_stage stage)
422 {
423 unsigned last_comp;
424 if (type->without_array()->is_record()) {
425 /* The component qualifier can't be used on structs so just treat
426 * all component slots as used.
427 */
428 last_comp = 4;
429 } else {
430 unsigned dmul = type->without_array()->is_64bit() ? 2 : 1;
431 last_comp = component + type->without_array()->vector_elements * dmul;
432 }
433
434 while (location < location_limit) {
435 unsigned i = component;
436
437 /* If there are other outputs assigned to the same location
438 * they must have the same interpolation
439 */
440 unsigned comp = 0;
441 while (comp < 4) {
442 /* Skip the components used by this output, we only care about
443 * other outputs in the same location
444 */
445 if (comp == i) {
446 comp = last_comp;
447 continue;
448 }
449
450 struct explicit_location_info *info =
451 &explicit_locations[location][comp];
452
453 if (info->var) {
454 if (info->interpolation != interpolation) {
455 linker_error(prog,
456 "%s shader has multiple outputs at explicit "
457 "location %u with different interpolation "
458 "settings\n",
459 _mesa_shader_stage_to_string(stage), location);
460 return false;
461 }
462 }
463
464 comp++;
465 }
466
467 /* Component aliasing is not allowed */
468 while (i < last_comp) {
469 if (explicit_locations[location][i].var != NULL) {
470 linker_error(prog,
471 "%s shader has multiple outputs explicitly "
472 "assigned to location %d and component %d\n",
473 _mesa_shader_stage_to_string(stage),
474 location, component);
475 return false;
476 }
477
478 /* Make sure all component at this location have the same type.
479 */
480 for (unsigned j = 0; j < 4; j++) {
481 if (explicit_locations[location][j].var &&
482 explicit_locations[location][j].base_type !=
483 type->without_array()->base_type) {
484 linker_error(prog,
485 "Varyings sharing the same location must "
486 "have the same underlying numerical type. "
487 "Location %u component %u\n", location, component);
488 return false;
489 }
490 }
491
492 explicit_locations[location][i].var = var;
493 explicit_locations[location][i].base_type =
494 type->without_array()->base_type;
495 explicit_locations[location][i].interpolation = interpolation;
496 i++;
497
498 /* We need to do some special handling for doubles as dvec3 and
499 * dvec4 consume two consecutive locations. We don't need to
500 * worry about components beginning at anything other than 0 as
501 * the spec does not allow this for dvec3 and dvec4.
502 */
503 if (i == 4 && last_comp > 4) {
504 last_comp = last_comp - 4;
505 /* Bump location index and reset the component index */
506 location++;
507 i = 0;
508 }
509 }
510
511 location++;
512 }
513
514 return true;
515 }
516
517 /**
518 * Validate that outputs from one stage match inputs of another
519 */
520 void
521 cross_validate_outputs_to_inputs(struct gl_context *ctx,
522 struct gl_shader_program *prog,
523 gl_linked_shader *producer,
524 gl_linked_shader *consumer)
525 {
526 glsl_symbol_table parameters;
527 struct explicit_location_info explicit_locations[MAX_VARYING][4] = { 0 };
528
529 /* Find all shader outputs in the "producer" stage.
530 */
531 foreach_in_list(ir_instruction, node, producer->ir) {
532 ir_variable *const var = node->as_variable();
533
534 if (var == NULL || var->data.mode != ir_var_shader_out)
535 continue;
536
537 if (!var->data.explicit_location
538 || var->data.location < VARYING_SLOT_VAR0)
539 parameters.add_variable(var);
540 else {
541 /* User-defined varyings with explicit locations are handled
542 * differently because they do not need to have matching names.
543 */
544 const glsl_type *type = get_varying_type(var, producer->Stage);
545 unsigned num_elements = type->count_attribute_slots(false);
546 unsigned idx = compute_variable_location_slot(var, producer->Stage);
547 unsigned slot_limit = idx + num_elements;
548
549 unsigned slot_max =
550 ctx->Const.Program[producer->Stage].MaxOutputComponents / 4;
551 if (slot_limit > slot_max) {
552 linker_error(prog,
553 "Invalid location %u in %s shader\n",
554 idx, _mesa_shader_stage_to_string(producer->Stage));
555 return;
556 }
557
558 if (type->without_array()->is_interface()) {
559 for (unsigned i = 0; i < type->without_array()->length; i++) {
560 const glsl_type *field_type = type->fields.structure[i].type;
561 unsigned field_location = type->fields.structure[i].location -
562 (type->fields.structure[i].patch ? VARYING_SLOT_PATCH0 :
563 VARYING_SLOT_VAR0);
564 unsigned interpolation = type->fields.structure[i].interpolation;
565 if (!check_location_aliasing(explicit_locations, var,
566 field_location,
567 0, field_location + 1,
568 field_type, interpolation,
569 prog, producer->Stage)) {
570 return;
571 }
572 }
573 } else if (!check_location_aliasing(explicit_locations, var,
574 idx, var->data.location_frac,
575 slot_limit, type,
576 var->data.interpolation,
577 prog, producer->Stage)) {
578 return;
579 }
580 }
581 }
582
583
584 /* Find all shader inputs in the "consumer" stage. Any variables that have
585 * matching outputs already in the symbol table must have the same type and
586 * qualifiers.
587 *
588 * Exception: if the consumer is the geometry shader, then the inputs
589 * should be arrays and the type of the array element should match the type
590 * of the corresponding producer output.
591 */
592 foreach_in_list(ir_instruction, node, consumer->ir) {
593 ir_variable *const input = node->as_variable();
594
595 if (input == NULL || input->data.mode != ir_var_shader_in)
596 continue;
597
598 if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
599 const ir_variable *const front_color =
600 parameters.get_variable("gl_FrontColor");
601
602 const ir_variable *const back_color =
603 parameters.get_variable("gl_BackColor");
604
605 cross_validate_front_and_back_color(prog, input,
606 front_color, back_color,
607 consumer->Stage, producer->Stage);
608 } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
609 const ir_variable *const front_color =
610 parameters.get_variable("gl_FrontSecondaryColor");
611
612 const ir_variable *const back_color =
613 parameters.get_variable("gl_BackSecondaryColor");
614
615 cross_validate_front_and_back_color(prog, input,
616 front_color, back_color,
617 consumer->Stage, producer->Stage);
618 } else {
619 /* The rules for connecting inputs and outputs change in the presence
620 * of explicit locations. In this case, we no longer care about the
621 * names of the variables. Instead, we care only about the
622 * explicitly assigned location.
623 */
624 ir_variable *output = NULL;
625 if (input->data.explicit_location
626 && input->data.location >= VARYING_SLOT_VAR0) {
627
628 const glsl_type *type = get_varying_type(input, consumer->Stage);
629 unsigned num_elements = type->count_attribute_slots(false);
630 unsigned idx =
631 compute_variable_location_slot(input, consumer->Stage);
632 unsigned slot_limit = idx + num_elements;
633
634 while (idx < slot_limit) {
635 if (idx >= MAX_VARYING) {
636 linker_error(prog,
637 "Invalid location %u in %s shader\n", idx,
638 _mesa_shader_stage_to_string(consumer->Stage));
639 return;
640 }
641
642 output = explicit_locations[idx][input->data.location_frac].var;
643
644 if (output == NULL ||
645 input->data.location != output->data.location) {
646 linker_error(prog,
647 "%s shader input `%s' with explicit location "
648 "has no matching output\n",
649 _mesa_shader_stage_to_string(consumer->Stage),
650 input->name);
651 break;
652 }
653 idx++;
654 }
655 } else {
656 output = parameters.get_variable(input->name);
657 }
658
659 if (output != NULL) {
660 /* Interface blocks have their own validation elsewhere so don't
661 * try validating them here.
662 */
663 if (!(input->get_interface_type() &&
664 output->get_interface_type()))
665 cross_validate_types_and_qualifiers(prog, input, output,
666 consumer->Stage,
667 producer->Stage);
668 } else {
669 /* Check for input vars with unmatched output vars in prev stage
670 * taking into account that interface blocks could have a matching
671 * output but with different name, so we ignore them.
672 */
673 assert(!input->data.assigned);
674 if (input->data.used && !input->get_interface_type() &&
675 !input->data.explicit_location && !prog->SeparateShader)
676 linker_error(prog,
677 "%s shader input `%s' "
678 "has no matching output in the previous stage\n",
679 _mesa_shader_stage_to_string(consumer->Stage),
680 input->name);
681 }
682 }
683 }
684 }
685
686 /**
687 * Demote shader inputs and outputs that are not used in other stages, and
688 * remove them via dead code elimination.
689 */
690 static void
691 remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
692 gl_linked_shader *sh,
693 enum ir_variable_mode mode)
694 {
695 if (is_separate_shader_object)
696 return;
697
698 foreach_in_list(ir_instruction, node, sh->ir) {
699 ir_variable *const var = node->as_variable();
700
701 if (var == NULL || var->data.mode != int(mode))
702 continue;
703
704 /* A shader 'in' or 'out' variable is only really an input or output if
705 * its value is used by other shader stages. This will cause the
706 * variable to have a location assigned.
707 */
708 if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
709 assert(var->data.mode != ir_var_temporary);
710
711 /* Assign zeros to demoted inputs to allow more optimizations. */
712 if (var->data.mode == ir_var_shader_in && !var->constant_value)
713 var->constant_value = ir_constant::zero(var, var->type);
714
715 var->data.mode = ir_var_auto;
716 }
717 }
718
719 /* Eliminate code that is now dead due to unused inputs/outputs being
720 * demoted.
721 */
722 while (do_dead_code(sh->ir, false))
723 ;
724
725 }
726
727 /**
728 * Initialize this object based on a string that was passed to
729 * glTransformFeedbackVaryings.
730 *
731 * If the input is mal-formed, this call still succeeds, but it sets
732 * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
733 * will fail to find any matching variable.
734 */
735 void
736 tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
737 const char *input)
738 {
739 /* We don't have to be pedantic about what is a valid GLSL variable name,
740 * because any variable with an invalid name can't exist in the IR anyway.
741 */
742
743 this->location = -1;
744 this->orig_name = input;
745 this->lowered_builtin_array_variable = none;
746 this->skip_components = 0;
747 this->next_buffer_separator = false;
748 this->matched_candidate = NULL;
749 this->stream_id = 0;
750 this->buffer = 0;
751 this->offset = 0;
752
753 if (ctx->Extensions.ARB_transform_feedback3) {
754 /* Parse gl_NextBuffer. */
755 if (strcmp(input, "gl_NextBuffer") == 0) {
756 this->next_buffer_separator = true;
757 return;
758 }
759
760 /* Parse gl_SkipComponents. */
761 if (strcmp(input, "gl_SkipComponents1") == 0)
762 this->skip_components = 1;
763 else if (strcmp(input, "gl_SkipComponents2") == 0)
764 this->skip_components = 2;
765 else if (strcmp(input, "gl_SkipComponents3") == 0)
766 this->skip_components = 3;
767 else if (strcmp(input, "gl_SkipComponents4") == 0)
768 this->skip_components = 4;
769
770 if (this->skip_components)
771 return;
772 }
773
774 /* Parse a declaration. */
775 const char *base_name_end;
776 long subscript = parse_program_resource_name(input, &base_name_end);
777 this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
778 if (this->var_name == NULL) {
779 _mesa_error_no_memory(__func__);
780 return;
781 }
782
783 if (subscript >= 0) {
784 this->array_subscript = subscript;
785 this->is_subscripted = true;
786 } else {
787 this->is_subscripted = false;
788 }
789
790 /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
791 * class must behave specially to account for the fact that gl_ClipDistance
792 * is converted from a float[8] to a vec4[2].
793 */
794 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
795 strcmp(this->var_name, "gl_ClipDistance") == 0) {
796 this->lowered_builtin_array_variable = clip_distance;
797 }
798 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
799 strcmp(this->var_name, "gl_CullDistance") == 0) {
800 this->lowered_builtin_array_variable = cull_distance;
801 }
802
803 if (ctx->Const.LowerTessLevel &&
804 (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
805 this->lowered_builtin_array_variable = tess_level_outer;
806 if (ctx->Const.LowerTessLevel &&
807 (strcmp(this->var_name, "gl_TessLevelInner") == 0))
808 this->lowered_builtin_array_variable = tess_level_inner;
809 }
810
811
812 /**
813 * Determine whether two tfeedback_decl objects refer to the same variable and
814 * array index (if applicable).
815 */
816 bool
817 tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
818 {
819 assert(x.is_varying() && y.is_varying());
820
821 if (strcmp(x.var_name, y.var_name) != 0)
822 return false;
823 if (x.is_subscripted != y.is_subscripted)
824 return false;
825 if (x.is_subscripted && x.array_subscript != y.array_subscript)
826 return false;
827 return true;
828 }
829
830
831 /**
832 * Assign a location and stream ID for this tfeedback_decl object based on the
833 * transform feedback candidate found by find_candidate.
834 *
835 * If an error occurs, the error is reported through linker_error() and false
836 * is returned.
837 */
838 bool
839 tfeedback_decl::assign_location(struct gl_context *ctx,
840 struct gl_shader_program *prog)
841 {
842 assert(this->is_varying());
843
844 unsigned fine_location
845 = this->matched_candidate->toplevel_var->data.location * 4
846 + this->matched_candidate->toplevel_var->data.location_frac
847 + this->matched_candidate->offset;
848 const unsigned dmul =
849 this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
850
851 if (this->matched_candidate->type->is_array()) {
852 /* Array variable */
853 const unsigned matrix_cols =
854 this->matched_candidate->type->fields.array->matrix_columns;
855 const unsigned vector_elements =
856 this->matched_candidate->type->fields.array->vector_elements;
857 unsigned actual_array_size;
858 switch (this->lowered_builtin_array_variable) {
859 case clip_distance:
860 actual_array_size = prog->last_vert_prog ?
861 prog->last_vert_prog->info.clip_distance_array_size : 0;
862 break;
863 case cull_distance:
864 actual_array_size = prog->last_vert_prog ?
865 prog->last_vert_prog->info.cull_distance_array_size : 0;
866 break;
867 case tess_level_outer:
868 actual_array_size = 4;
869 break;
870 case tess_level_inner:
871 actual_array_size = 2;
872 break;
873 case none:
874 default:
875 actual_array_size = this->matched_candidate->type->array_size();
876 break;
877 }
878
879 if (this->is_subscripted) {
880 /* Check array bounds. */
881 if (this->array_subscript >= actual_array_size) {
882 linker_error(prog, "Transform feedback varying %s has index "
883 "%i, but the array size is %u.",
884 this->orig_name, this->array_subscript,
885 actual_array_size);
886 return false;
887 }
888 unsigned array_elem_size = this->lowered_builtin_array_variable ?
889 1 : vector_elements * matrix_cols * dmul;
890 fine_location += array_elem_size * this->array_subscript;
891 this->size = 1;
892 } else {
893 this->size = actual_array_size;
894 }
895 this->vector_elements = vector_elements;
896 this->matrix_columns = matrix_cols;
897 if (this->lowered_builtin_array_variable)
898 this->type = GL_FLOAT;
899 else
900 this->type = this->matched_candidate->type->fields.array->gl_type;
901 } else {
902 /* Regular variable (scalar, vector, or matrix) */
903 if (this->is_subscripted) {
904 linker_error(prog, "Transform feedback varying %s requested, "
905 "but %s is not an array.",
906 this->orig_name, this->var_name);
907 return false;
908 }
909 this->size = 1;
910 this->vector_elements = this->matched_candidate->type->vector_elements;
911 this->matrix_columns = this->matched_candidate->type->matrix_columns;
912 this->type = this->matched_candidate->type->gl_type;
913 }
914 this->location = fine_location / 4;
915 this->location_frac = fine_location % 4;
916
917 /* From GL_EXT_transform_feedback:
918 * A program will fail to link if:
919 *
920 * * the total number of components to capture in any varying
921 * variable in <varyings> is greater than the constant
922 * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
923 * buffer mode is SEPARATE_ATTRIBS_EXT;
924 */
925 if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
926 this->num_components() >
927 ctx->Const.MaxTransformFeedbackSeparateComponents) {
928 linker_error(prog, "Transform feedback varying %s exceeds "
929 "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
930 this->orig_name);
931 return false;
932 }
933
934 /* Only transform feedback varyings can be assigned to non-zero streams,
935 * so assign the stream id here.
936 */
937 this->stream_id = this->matched_candidate->toplevel_var->data.stream;
938
939 unsigned array_offset = this->array_subscript * 4 * dmul;
940 unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
941 this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
942 this->offset = this->matched_candidate->toplevel_var->data.offset +
943 array_offset + struct_offset;
944
945 return true;
946 }
947
948
949 unsigned
950 tfeedback_decl::get_num_outputs() const
951 {
952 if (!this->is_varying()) {
953 return 0;
954 }
955 return (this->num_components() + this->location_frac + 3)/4;
956 }
957
958
959 /**
960 * Update gl_transform_feedback_info to reflect this tfeedback_decl.
961 *
962 * If an error occurs, the error is reported through linker_error() and false
963 * is returned.
964 */
965 bool
966 tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
967 struct gl_transform_feedback_info *info,
968 unsigned buffer, unsigned buffer_index,
969 const unsigned max_outputs, bool *explicit_stride,
970 bool has_xfb_qualifiers) const
971 {
972 unsigned xfb_offset = 0;
973 unsigned size = this->size;
974 /* Handle gl_SkipComponents. */
975 if (this->skip_components) {
976 info->Buffers[buffer].Stride += this->skip_components;
977 size = this->skip_components;
978 goto store_varying;
979 }
980
981 if (this->next_buffer_separator) {
982 size = 0;
983 goto store_varying;
984 }
985
986 if (has_xfb_qualifiers) {
987 xfb_offset = this->offset / 4;
988 } else {
989 xfb_offset = info->Buffers[buffer].Stride;
990 }
991 info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
992
993 {
994 unsigned location = this->location;
995 unsigned location_frac = this->location_frac;
996 unsigned num_components = this->num_components();
997 while (num_components > 0) {
998 unsigned output_size = MIN2(num_components, 4 - location_frac);
999 assert((info->NumOutputs == 0 && max_outputs == 0) ||
1000 info->NumOutputs < max_outputs);
1001
1002 /* From the ARB_enhanced_layouts spec:
1003 *
1004 * "If such a block member or variable is not written during a shader
1005 * invocation, the buffer contents at the assigned offset will be
1006 * undefined. Even if there are no static writes to a variable or
1007 * member that is assigned a transform feedback offset, the space is
1008 * still allocated in the buffer and still affects the stride."
1009 */
1010 if (this->is_varying_written()) {
1011 info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
1012 info->Outputs[info->NumOutputs].OutputRegister = location;
1013 info->Outputs[info->NumOutputs].NumComponents = output_size;
1014 info->Outputs[info->NumOutputs].StreamId = stream_id;
1015 info->Outputs[info->NumOutputs].OutputBuffer = buffer;
1016 info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
1017 ++info->NumOutputs;
1018 }
1019 info->Buffers[buffer].Stream = this->stream_id;
1020 xfb_offset += output_size;
1021
1022 num_components -= output_size;
1023 location++;
1024 location_frac = 0;
1025 }
1026 }
1027
1028 if (explicit_stride && explicit_stride[buffer]) {
1029 if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
1030 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
1031 "multiple of 8 as its applied to a type that is or "
1032 "contains a double.",
1033 info->Buffers[buffer].Stride * 4);
1034 return false;
1035 }
1036
1037 if ((this->offset / 4) / info->Buffers[buffer].Stride !=
1038 (xfb_offset - 1) / info->Buffers[buffer].Stride) {
1039 linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
1040 "buffer (%d)", xfb_offset * 4,
1041 info->Buffers[buffer].Stride * 4, buffer);
1042 return false;
1043 }
1044 } else {
1045 info->Buffers[buffer].Stride = xfb_offset;
1046 }
1047
1048 /* From GL_EXT_transform_feedback:
1049 * A program will fail to link if:
1050 *
1051 * * the total number of components to capture is greater than
1052 * the constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
1053 * and the buffer mode is INTERLEAVED_ATTRIBS_EXT.
1054 *
1055 * From GL_ARB_enhanced_layouts:
1056 *
1057 * "The resulting stride (implicit or explicit) must be less than or
1058 * equal to the implementation-dependent constant
1059 * gl_MaxTransformFeedbackInterleavedComponents."
1060 */
1061 if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
1062 has_xfb_qualifiers) &&
1063 info->Buffers[buffer].Stride >
1064 ctx->Const.MaxTransformFeedbackInterleavedComponents) {
1065 linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1066 "limit has been exceeded.");
1067 return false;
1068 }
1069
1070 store_varying:
1071 info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
1072 this->orig_name);
1073 info->Varyings[info->NumVarying].Type = this->type;
1074 info->Varyings[info->NumVarying].Size = size;
1075 info->Varyings[info->NumVarying].BufferIndex = buffer_index;
1076 info->NumVarying++;
1077 info->Buffers[buffer].NumVaryings++;
1078
1079 return true;
1080 }
1081
1082
1083 const tfeedback_candidate *
1084 tfeedback_decl::find_candidate(gl_shader_program *prog,
1085 hash_table *tfeedback_candidates)
1086 {
1087 const char *name = this->var_name;
1088 switch (this->lowered_builtin_array_variable) {
1089 case none:
1090 name = this->var_name;
1091 break;
1092 case clip_distance:
1093 name = "gl_ClipDistanceMESA";
1094 break;
1095 case cull_distance:
1096 name = "gl_CullDistanceMESA";
1097 break;
1098 case tess_level_outer:
1099 name = "gl_TessLevelOuterMESA";
1100 break;
1101 case tess_level_inner:
1102 name = "gl_TessLevelInnerMESA";
1103 break;
1104 }
1105 hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
1106
1107 this->matched_candidate = entry ?
1108 (const tfeedback_candidate *) entry->data : NULL;
1109
1110 if (!this->matched_candidate) {
1111 /* From GL_EXT_transform_feedback:
1112 * A program will fail to link if:
1113 *
1114 * * any variable name specified in the <varyings> array is not
1115 * declared as an output in the geometry shader (if present) or
1116 * the vertex shader (if no geometry shader is present);
1117 */
1118 linker_error(prog, "Transform feedback varying %s undeclared.",
1119 this->orig_name);
1120 }
1121
1122 return this->matched_candidate;
1123 }
1124
1125
1126 /**
1127 * Parse all the transform feedback declarations that were passed to
1128 * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
1129 *
1130 * If an error occurs, the error is reported through linker_error() and false
1131 * is returned.
1132 */
1133 static bool
1134 parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
1135 const void *mem_ctx, unsigned num_names,
1136 char **varying_names, tfeedback_decl *decls)
1137 {
1138 for (unsigned i = 0; i < num_names; ++i) {
1139 decls[i].init(ctx, mem_ctx, varying_names[i]);
1140
1141 if (!decls[i].is_varying())
1142 continue;
1143
1144 /* From GL_EXT_transform_feedback:
1145 * A program will fail to link if:
1146 *
1147 * * any two entries in the <varyings> array specify the same varying
1148 * variable;
1149 *
1150 * We interpret this to mean "any two entries in the <varyings> array
1151 * specify the same varying variable and array index", since transform
1152 * feedback of arrays would be useless otherwise.
1153 */
1154 for (unsigned j = 0; j < i; ++j) {
1155 if (!decls[j].is_varying())
1156 continue;
1157
1158 if (tfeedback_decl::is_same(decls[i], decls[j])) {
1159 linker_error(prog, "Transform feedback varying %s specified "
1160 "more than once.", varying_names[i]);
1161 return false;
1162 }
1163 }
1164 }
1165 return true;
1166 }
1167
1168
1169 static int
1170 cmp_xfb_offset(const void * x_generic, const void * y_generic)
1171 {
1172 tfeedback_decl *x = (tfeedback_decl *) x_generic;
1173 tfeedback_decl *y = (tfeedback_decl *) y_generic;
1174
1175 if (x->get_buffer() != y->get_buffer())
1176 return x->get_buffer() - y->get_buffer();
1177 return x->get_offset() - y->get_offset();
1178 }
1179
1180 /**
1181 * Store transform feedback location assignments into
1182 * prog->sh.LinkedTransformFeedback based on the data stored in
1183 * tfeedback_decls.
1184 *
1185 * If an error occurs, the error is reported through linker_error() and false
1186 * is returned.
1187 */
1188 static bool
1189 store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
1190 unsigned num_tfeedback_decls,
1191 tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers)
1192 {
1193 if (!prog->last_vert_prog)
1194 return true;
1195
1196 /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
1197 * tracking the number of buffers doesn't overflow.
1198 */
1199 assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
1200
1201 bool separate_attribs_mode =
1202 prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
1203
1204 struct gl_program *xfb_prog = prog->last_vert_prog;
1205 xfb_prog->sh.LinkedTransformFeedback =
1206 rzalloc(xfb_prog, struct gl_transform_feedback_info);
1207
1208 /* The xfb_offset qualifier does not have to be used in increasing order
1209 * however some drivers expect to receive the list of transform feedback
1210 * declarations in order so sort it now for convenience.
1211 */
1212 if (has_xfb_qualifiers)
1213 qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
1214 cmp_xfb_offset);
1215
1216 xfb_prog->sh.LinkedTransformFeedback->Varyings =
1217 rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
1218 num_tfeedback_decls);
1219
1220 unsigned num_outputs = 0;
1221 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1222 if (tfeedback_decls[i].is_varying_written())
1223 num_outputs += tfeedback_decls[i].get_num_outputs();
1224 }
1225
1226 xfb_prog->sh.LinkedTransformFeedback->Outputs =
1227 rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
1228 num_outputs);
1229
1230 unsigned num_buffers = 0;
1231 unsigned buffers = 0;
1232
1233 if (!has_xfb_qualifiers && separate_attribs_mode) {
1234 /* GL_SEPARATE_ATTRIBS */
1235 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1236 if (!tfeedback_decls[i].store(ctx, prog,
1237 xfb_prog->sh.LinkedTransformFeedback,
1238 num_buffers, num_buffers, num_outputs,
1239 NULL, has_xfb_qualifiers))
1240 return false;
1241
1242 buffers |= 1 << num_buffers;
1243 num_buffers++;
1244 }
1245 }
1246 else {
1247 /* GL_INVERLEAVED_ATTRIBS */
1248 int buffer_stream_id = -1;
1249 unsigned buffer =
1250 num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
1251 bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
1252
1253 /* Apply any xfb_stride global qualifiers */
1254 if (has_xfb_qualifiers) {
1255 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1256 if (prog->TransformFeedback.BufferStride[j]) {
1257 buffers |= 1 << j;
1258 explicit_stride[j] = true;
1259 xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
1260 prog->TransformFeedback.BufferStride[j] / 4;
1261 }
1262 }
1263 }
1264
1265 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1266 if (has_xfb_qualifiers &&
1267 buffer != tfeedback_decls[i].get_buffer()) {
1268 /* we have moved to the next buffer so reset stream id */
1269 buffer_stream_id = -1;
1270 num_buffers++;
1271 }
1272
1273 if (tfeedback_decls[i].is_next_buffer_separator()) {
1274 if (!tfeedback_decls[i].store(ctx, prog,
1275 xfb_prog->sh.LinkedTransformFeedback,
1276 buffer, num_buffers, num_outputs,
1277 explicit_stride, has_xfb_qualifiers))
1278 return false;
1279 num_buffers++;
1280 buffer_stream_id = -1;
1281 continue;
1282 } else if (tfeedback_decls[i].is_varying()) {
1283 if (buffer_stream_id == -1) {
1284 /* First varying writing to this buffer: remember its stream */
1285 buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
1286 } else if (buffer_stream_id !=
1287 (int) tfeedback_decls[i].get_stream_id()) {
1288 /* Varying writes to the same buffer from a different stream */
1289 linker_error(prog,
1290 "Transform feedback can't capture varyings belonging "
1291 "to different vertex streams in a single buffer. "
1292 "Varying %s writes to buffer from stream %u, other "
1293 "varyings in the same buffer write from stream %u.",
1294 tfeedback_decls[i].name(),
1295 tfeedback_decls[i].get_stream_id(),
1296 buffer_stream_id);
1297 return false;
1298 }
1299 }
1300
1301 if (has_xfb_qualifiers) {
1302 buffer = tfeedback_decls[i].get_buffer();
1303 } else {
1304 buffer = num_buffers;
1305 }
1306 buffers |= 1 << buffer;
1307
1308 if (!tfeedback_decls[i].store(ctx, prog,
1309 xfb_prog->sh.LinkedTransformFeedback,
1310 buffer, num_buffers, num_outputs,
1311 explicit_stride, has_xfb_qualifiers))
1312 return false;
1313 }
1314 }
1315
1316 assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
1317
1318 xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
1319 return true;
1320 }
1321
1322 namespace {
1323
1324 /**
1325 * Data structure recording the relationship between outputs of one shader
1326 * stage (the "producer") and inputs of another (the "consumer").
1327 */
1328 class varying_matches
1329 {
1330 public:
1331 varying_matches(bool disable_varying_packing, bool xfb_enabled,
1332 bool enhanced_layouts_enabled,
1333 gl_shader_stage producer_stage,
1334 gl_shader_stage consumer_stage);
1335 ~varying_matches();
1336 void record(ir_variable *producer_var, ir_variable *consumer_var);
1337 unsigned assign_locations(struct gl_shader_program *prog,
1338 uint8_t *components,
1339 uint64_t reserved_slots);
1340 void store_locations() const;
1341
1342 private:
1343 bool is_varying_packing_safe(const glsl_type *type,
1344 const ir_variable *var);
1345
1346 /**
1347 * If true, this driver disables varying packing, so all varyings need to
1348 * be aligned on slot boundaries, and take up a number of slots equal to
1349 * their number of matrix columns times their array size.
1350 *
1351 * Packing may also be disabled because our current packing method is not
1352 * safe in SSO or versions of OpenGL where interpolation qualifiers are not
1353 * guaranteed to match across stages.
1354 */
1355 const bool disable_varying_packing;
1356
1357 /**
1358 * If true, this driver has transform feedback enabled. The transform
1359 * feedback code requires at least some packing be done even when varying
1360 * packing is disabled, fortunately where transform feedback requires
1361 * packing it's safe to override the disabled setting. See
1362 * is_varying_packing_safe().
1363 */
1364 const bool xfb_enabled;
1365
1366 const bool enhanced_layouts_enabled;
1367
1368 /**
1369 * Enum representing the order in which varyings are packed within a
1370 * packing class.
1371 *
1372 * Currently we pack vec4's first, then vec2's, then scalar values, then
1373 * vec3's. This order ensures that the only vectors that are at risk of
1374 * having to be "double parked" (split between two adjacent varying slots)
1375 * are the vec3's.
1376 */
1377 enum packing_order_enum {
1378 PACKING_ORDER_VEC4,
1379 PACKING_ORDER_VEC2,
1380 PACKING_ORDER_SCALAR,
1381 PACKING_ORDER_VEC3,
1382 };
1383
1384 static unsigned compute_packing_class(const ir_variable *var);
1385 static packing_order_enum compute_packing_order(const ir_variable *var);
1386 static int match_comparator(const void *x_generic, const void *y_generic);
1387 static int xfb_comparator(const void *x_generic, const void *y_generic);
1388
1389 /**
1390 * Structure recording the relationship between a single producer output
1391 * and a single consumer input.
1392 */
1393 struct match {
1394 /**
1395 * Packing class for this varying, computed by compute_packing_class().
1396 */
1397 unsigned packing_class;
1398
1399 /**
1400 * Packing order for this varying, computed by compute_packing_order().
1401 */
1402 packing_order_enum packing_order;
1403 unsigned num_components;
1404
1405 /**
1406 * The output variable in the producer stage.
1407 */
1408 ir_variable *producer_var;
1409
1410 /**
1411 * The input variable in the consumer stage.
1412 */
1413 ir_variable *consumer_var;
1414
1415 /**
1416 * The location which has been assigned for this varying. This is
1417 * expressed in multiples of a float, with the first generic varying
1418 * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
1419 * value 0.
1420 */
1421 unsigned generic_location;
1422 } *matches;
1423
1424 /**
1425 * The number of elements in the \c matches array that are currently in
1426 * use.
1427 */
1428 unsigned num_matches;
1429
1430 /**
1431 * The number of elements that were set aside for the \c matches array when
1432 * it was allocated.
1433 */
1434 unsigned matches_capacity;
1435
1436 gl_shader_stage producer_stage;
1437 gl_shader_stage consumer_stage;
1438 };
1439
1440 } /* anonymous namespace */
1441
1442 varying_matches::varying_matches(bool disable_varying_packing,
1443 bool xfb_enabled,
1444 bool enhanced_layouts_enabled,
1445 gl_shader_stage producer_stage,
1446 gl_shader_stage consumer_stage)
1447 : disable_varying_packing(disable_varying_packing),
1448 xfb_enabled(xfb_enabled),
1449 enhanced_layouts_enabled(enhanced_layouts_enabled),
1450 producer_stage(producer_stage),
1451 consumer_stage(consumer_stage)
1452 {
1453 /* Note: this initial capacity is rather arbitrarily chosen to be large
1454 * enough for many cases without wasting an unreasonable amount of space.
1455 * varying_matches::record() will resize the array if there are more than
1456 * this number of varyings.
1457 */
1458 this->matches_capacity = 8;
1459 this->matches = (match *)
1460 malloc(sizeof(*this->matches) * this->matches_capacity);
1461 this->num_matches = 0;
1462 }
1463
1464
1465 varying_matches::~varying_matches()
1466 {
1467 free(this->matches);
1468 }
1469
1470
1471 /**
1472 * Packing is always safe on individual arrays, structures, and matrices. It
1473 * is also safe if the varying is only used for transform feedback.
1474 */
1475 bool
1476 varying_matches::is_varying_packing_safe(const glsl_type *type,
1477 const ir_variable *var)
1478 {
1479 if (consumer_stage == MESA_SHADER_TESS_EVAL ||
1480 consumer_stage == MESA_SHADER_TESS_CTRL ||
1481 producer_stage == MESA_SHADER_TESS_CTRL)
1482 return false;
1483
1484 return xfb_enabled && (type->is_array() || type->is_record() ||
1485 type->is_matrix() || var->data.is_xfb_only);
1486 }
1487
1488
1489 /**
1490 * Record the given producer/consumer variable pair in the list of variables
1491 * that should later be assigned locations.
1492 *
1493 * It is permissible for \c consumer_var to be NULL (this happens if a
1494 * variable is output by the producer and consumed by transform feedback, but
1495 * not consumed by the consumer).
1496 *
1497 * If \c producer_var has already been paired up with a consumer_var, or
1498 * producer_var is part of fixed pipeline functionality (and hence already has
1499 * a location assigned), this function has no effect.
1500 *
1501 * Note: as a side effect this function may change the interpolation type of
1502 * \c producer_var, but only when the change couldn't possibly affect
1503 * rendering.
1504 */
1505 void
1506 varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
1507 {
1508 assert(producer_var != NULL || consumer_var != NULL);
1509
1510 if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
1511 producer_var->data.explicit_location)) ||
1512 (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
1513 consumer_var->data.explicit_location))) {
1514 /* Either a location already exists for this variable (since it is part
1515 * of fixed functionality), or it has already been recorded as part of a
1516 * previous match.
1517 */
1518 return;
1519 }
1520
1521 bool needs_flat_qualifier = consumer_var == NULL &&
1522 (producer_var->type->contains_integer() ||
1523 producer_var->type->contains_double());
1524
1525 if (!disable_varying_packing &&
1526 (needs_flat_qualifier ||
1527 (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
1528 /* Since this varying is not being consumed by the fragment shader, its
1529 * interpolation type varying cannot possibly affect rendering.
1530 * Also, this variable is non-flat and is (or contains) an integer
1531 * or a double.
1532 * If the consumer stage is unknown, don't modify the interpolation
1533 * type as it could affect rendering later with separate shaders.
1534 *
1535 * lower_packed_varyings requires all integer varyings to flat,
1536 * regardless of where they appear. We can trivially satisfy that
1537 * requirement by changing the interpolation type to flat here.
1538 */
1539 if (producer_var) {
1540 producer_var->data.centroid = false;
1541 producer_var->data.sample = false;
1542 producer_var->data.interpolation = INTERP_MODE_FLAT;
1543 }
1544
1545 if (consumer_var) {
1546 consumer_var->data.centroid = false;
1547 consumer_var->data.sample = false;
1548 consumer_var->data.interpolation = INTERP_MODE_FLAT;
1549 }
1550 }
1551
1552 if (this->num_matches == this->matches_capacity) {
1553 this->matches_capacity *= 2;
1554 this->matches = (match *)
1555 realloc(this->matches,
1556 sizeof(*this->matches) * this->matches_capacity);
1557 }
1558
1559 /* We must use the consumer to compute the packing class because in GL4.4+
1560 * there is no guarantee interpolation qualifiers will match across stages.
1561 *
1562 * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
1563 *
1564 * "The type and presence of interpolation qualifiers of variables with
1565 * the same name declared in all linked shaders for the same cross-stage
1566 * interface must match, otherwise the link command will fail.
1567 *
1568 * When comparing an output from one stage to an input of a subsequent
1569 * stage, the input and output don't match if their interpolation
1570 * qualifiers (or lack thereof) are not the same."
1571 *
1572 * This text was also in at least revison 7 of the 4.40 spec but is no
1573 * longer in revision 9 and not in the 4.50 spec.
1574 */
1575 const ir_variable *const var = (consumer_var != NULL)
1576 ? consumer_var : producer_var;
1577 const gl_shader_stage stage = (consumer_var != NULL)
1578 ? consumer_stage : producer_stage;
1579 const glsl_type *type = get_varying_type(var, stage);
1580
1581 if (producer_var && consumer_var &&
1582 consumer_var->data.must_be_shader_input) {
1583 producer_var->data.must_be_shader_input = 1;
1584 }
1585
1586 this->matches[this->num_matches].packing_class
1587 = this->compute_packing_class(var);
1588 this->matches[this->num_matches].packing_order
1589 = this->compute_packing_order(var);
1590 if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
1591 var->data.must_be_shader_input) {
1592 unsigned slots = type->count_attribute_slots(false);
1593 this->matches[this->num_matches].num_components = slots * 4;
1594 } else {
1595 this->matches[this->num_matches].num_components
1596 = type->component_slots();
1597 }
1598
1599 this->matches[this->num_matches].producer_var = producer_var;
1600 this->matches[this->num_matches].consumer_var = consumer_var;
1601 this->num_matches++;
1602 if (producer_var)
1603 producer_var->data.is_unmatched_generic_inout = 0;
1604 if (consumer_var)
1605 consumer_var->data.is_unmatched_generic_inout = 0;
1606 }
1607
1608
1609 /**
1610 * Choose locations for all of the variable matches that were previously
1611 * passed to varying_matches::record().
1612 */
1613 unsigned
1614 varying_matches::assign_locations(struct gl_shader_program *prog,
1615 uint8_t *components,
1616 uint64_t reserved_slots)
1617 {
1618 /* If packing has been disabled then we cannot safely sort the varyings by
1619 * class as it may mean we are using a version of OpenGL where
1620 * interpolation qualifiers are not guaranteed to be matching across
1621 * shaders, sorting in this case could result in mismatching shader
1622 * interfaces.
1623 * When packing is disabled the sort orders varyings used by transform
1624 * feedback first, but also depends on *undefined behaviour* of qsort to
1625 * reverse the order of the varyings. See: xfb_comparator().
1626 */
1627 if (!this->disable_varying_packing) {
1628 /* Sort varying matches into an order that makes them easy to pack. */
1629 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1630 &varying_matches::match_comparator);
1631 } else {
1632 /* Only sort varyings that are only used by transform feedback. */
1633 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1634 &varying_matches::xfb_comparator);
1635 }
1636
1637 unsigned generic_location = 0;
1638 unsigned generic_patch_location = MAX_VARYING*4;
1639 bool previous_var_xfb_only = false;
1640
1641 for (unsigned i = 0; i < this->num_matches; i++) {
1642 unsigned *location = &generic_location;
1643
1644 const ir_variable *var;
1645 const glsl_type *type;
1646 bool is_vertex_input = false;
1647 if (matches[i].consumer_var) {
1648 var = matches[i].consumer_var;
1649 type = get_varying_type(var, consumer_stage);
1650 if (consumer_stage == MESA_SHADER_VERTEX)
1651 is_vertex_input = true;
1652 } else {
1653 var = matches[i].producer_var;
1654 type = get_varying_type(var, producer_stage);
1655 }
1656
1657 if (var->data.patch)
1658 location = &generic_patch_location;
1659
1660 /* Advance to the next slot if this varying has a different packing
1661 * class than the previous one, and we're not already on a slot
1662 * boundary.
1663 *
1664 * Also advance to the next slot if packing is disabled. This makes sure
1665 * we don't assign varyings the same locations which is possible
1666 * because we still pack individual arrays, records and matrices even
1667 * when packing is disabled. Note we don't advance to the next slot if
1668 * we can pack varyings together that are only used for transform
1669 * feedback.
1670 */
1671 if (var->data.must_be_shader_input ||
1672 (this->disable_varying_packing &&
1673 !(previous_var_xfb_only && var->data.is_xfb_only)) ||
1674 (i > 0 && this->matches[i - 1].packing_class
1675 != this->matches[i].packing_class )) {
1676 *location = ALIGN(*location, 4);
1677 }
1678
1679 previous_var_xfb_only = var->data.is_xfb_only;
1680
1681 /* The number of components taken up by this variable. For vertex shader
1682 * inputs, we use the number of slots * 4, as they have different
1683 * counting rules.
1684 */
1685 unsigned num_components = is_vertex_input ?
1686 type->count_attribute_slots(is_vertex_input) * 4 :
1687 this->matches[i].num_components;
1688
1689 /* The last slot for this variable, inclusive. */
1690 unsigned slot_end = *location + num_components - 1;
1691
1692 /* FIXME: We could be smarter in the below code and loop back over
1693 * trying to fill any locations that we skipped because we couldn't pack
1694 * the varying between an explicit location. For now just let the user
1695 * hit the linking error if we run out of room and suggest they use
1696 * explicit locations.
1697 */
1698 while (slot_end < MAX_VARYING * 4u) {
1699 const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
1700 const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
1701
1702 assert(slots > 0);
1703 if (reserved_slots & slot_mask) {
1704 *location = ALIGN(*location + 1, 4);
1705 slot_end = *location + num_components - 1;
1706 continue;
1707 }
1708
1709 break;
1710 }
1711
1712 if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
1713 linker_error(prog, "insufficient contiguous locations available for "
1714 "%s it is possible an array or struct could not be "
1715 "packed between varyings with explicit locations. Try "
1716 "using an explicit location for arrays and structs.",
1717 var->name);
1718 }
1719
1720 if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
1721 for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
1722 components[j] = 4;
1723 components[slot_end / 4u] = (slot_end & 3) + 1;
1724 }
1725
1726 this->matches[i].generic_location = *location;
1727
1728 *location = slot_end + 1;
1729 }
1730
1731 return (generic_location + 3) / 4;
1732 }
1733
1734
1735 /**
1736 * Update the producer and consumer shaders to reflect the locations
1737 * assignments that were made by varying_matches::assign_locations().
1738 */
1739 void
1740 varying_matches::store_locations() const
1741 {
1742 /* Check is location needs to be packed with lower_packed_varyings() or if
1743 * we can just use ARB_enhanced_layouts packing.
1744 */
1745 bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
1746 const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
1747
1748 for (unsigned i = 0; i < this->num_matches; i++) {
1749 ir_variable *producer_var = this->matches[i].producer_var;
1750 ir_variable *consumer_var = this->matches[i].consumer_var;
1751 unsigned generic_location = this->matches[i].generic_location;
1752 unsigned slot = generic_location / 4;
1753 unsigned offset = generic_location % 4;
1754
1755 if (producer_var) {
1756 producer_var->data.location = VARYING_SLOT_VAR0 + slot;
1757 producer_var->data.location_frac = offset;
1758 }
1759
1760 if (consumer_var) {
1761 assert(consumer_var->data.location == -1);
1762 consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
1763 consumer_var->data.location_frac = offset;
1764 }
1765
1766 /* Find locations suitable for native packing via
1767 * ARB_enhanced_layouts.
1768 */
1769 if (producer_var && consumer_var) {
1770 if (enhanced_layouts_enabled) {
1771 const glsl_type *type =
1772 get_varying_type(producer_var, producer_stage);
1773 if (type->is_array() || type->is_matrix() || type->is_record() ||
1774 type->is_double()) {
1775 unsigned comp_slots = type->component_slots() + offset;
1776 unsigned slots = comp_slots / 4;
1777 if (comp_slots % 4)
1778 slots += 1;
1779
1780 for (unsigned j = 0; j < slots; j++) {
1781 pack_loc[slot + j] = true;
1782 }
1783 } else if (offset + type->vector_elements > 4) {
1784 pack_loc[slot] = true;
1785 pack_loc[slot + 1] = true;
1786 } else {
1787 loc_type[slot][offset] = type;
1788 }
1789 }
1790 }
1791 }
1792
1793 /* Attempt to use ARB_enhanced_layouts for more efficient packing if
1794 * suitable.
1795 */
1796 if (enhanced_layouts_enabled) {
1797 for (unsigned i = 0; i < this->num_matches; i++) {
1798 ir_variable *producer_var = this->matches[i].producer_var;
1799 ir_variable *consumer_var = this->matches[i].consumer_var;
1800 unsigned generic_location = this->matches[i].generic_location;
1801 unsigned slot = generic_location / 4;
1802
1803 if (pack_loc[slot] || !producer_var || !consumer_var)
1804 continue;
1805
1806 const glsl_type *type =
1807 get_varying_type(producer_var, producer_stage);
1808 bool type_match = true;
1809 for (unsigned j = 0; j < 4; j++) {
1810 if (loc_type[slot][j]) {
1811 if (type->base_type != loc_type[slot][j]->base_type)
1812 type_match = false;
1813 }
1814 }
1815
1816 if (type_match) {
1817 producer_var->data.explicit_location = 1;
1818 consumer_var->data.explicit_location = 1;
1819 producer_var->data.explicit_component = 1;
1820 consumer_var->data.explicit_component = 1;
1821 }
1822 }
1823 }
1824 }
1825
1826
1827 /**
1828 * Compute the "packing class" of the given varying. This is an unsigned
1829 * integer with the property that two variables in the same packing class can
1830 * be safely backed into the same vec4.
1831 */
1832 unsigned
1833 varying_matches::compute_packing_class(const ir_variable *var)
1834 {
1835 /* Without help from the back-end, there is no way to pack together
1836 * variables with different interpolation types, because
1837 * lower_packed_varyings must choose exactly one interpolation type for
1838 * each packed varying it creates.
1839 *
1840 * However, we can safely pack together floats, ints, and uints, because:
1841 *
1842 * - varyings of base type "int" and "uint" must use the "flat"
1843 * interpolation type, which can only occur in GLSL 1.30 and above.
1844 *
1845 * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
1846 * can store flat floats as ints without losing any information (using
1847 * the ir_unop_bitcast_* opcodes).
1848 *
1849 * Therefore, the packing class depends only on the interpolation type.
1850 */
1851 unsigned packing_class = var->data.centroid | (var->data.sample << 1) |
1852 (var->data.patch << 2) |
1853 (var->data.must_be_shader_input << 3);
1854 packing_class *= 8;
1855 packing_class += var->is_interpolation_flat()
1856 ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
1857 return packing_class;
1858 }
1859
1860
1861 /**
1862 * Compute the "packing order" of the given varying. This is a sort key we
1863 * use to determine when to attempt to pack the given varying relative to
1864 * other varyings in the same packing class.
1865 */
1866 varying_matches::packing_order_enum
1867 varying_matches::compute_packing_order(const ir_variable *var)
1868 {
1869 const glsl_type *element_type = var->type;
1870
1871 while (element_type->is_array()) {
1872 element_type = element_type->fields.array;
1873 }
1874
1875 switch (element_type->component_slots() % 4) {
1876 case 1: return PACKING_ORDER_SCALAR;
1877 case 2: return PACKING_ORDER_VEC2;
1878 case 3: return PACKING_ORDER_VEC3;
1879 case 0: return PACKING_ORDER_VEC4;
1880 default:
1881 assert(!"Unexpected value of vector_elements");
1882 return PACKING_ORDER_VEC4;
1883 }
1884 }
1885
1886
1887 /**
1888 * Comparison function passed to qsort() to sort varyings by packing_class and
1889 * then by packing_order.
1890 */
1891 int
1892 varying_matches::match_comparator(const void *x_generic, const void *y_generic)
1893 {
1894 const match *x = (const match *) x_generic;
1895 const match *y = (const match *) y_generic;
1896
1897 if (x->packing_class != y->packing_class)
1898 return x->packing_class - y->packing_class;
1899 return x->packing_order - y->packing_order;
1900 }
1901
1902
1903 /**
1904 * Comparison function passed to qsort() to sort varyings used only by
1905 * transform feedback when packing of other varyings is disabled.
1906 */
1907 int
1908 varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
1909 {
1910 const match *x = (const match *) x_generic;
1911
1912 if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
1913 return match_comparator(x_generic, y_generic);
1914
1915 /* FIXME: When the comparator returns 0 it means the elements being
1916 * compared are equivalent. However the qsort documentation says:
1917 *
1918 * "The order of equivalent elements is undefined."
1919 *
1920 * In practice the sort ends up reversing the order of the varyings which
1921 * means locations are also assigned in this reversed order and happens to
1922 * be what we want. This is also whats happening in
1923 * varying_matches::match_comparator().
1924 */
1925 return 0;
1926 }
1927
1928
1929 /**
1930 * Is the given variable a varying variable to be counted against the
1931 * limit in ctx->Const.MaxVarying?
1932 * This includes variables such as texcoords, colors and generic
1933 * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
1934 */
1935 static bool
1936 var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
1937 {
1938 /* Only fragment shaders will take a varying variable as an input */
1939 if (stage == MESA_SHADER_FRAGMENT &&
1940 var->data.mode == ir_var_shader_in) {
1941 switch (var->data.location) {
1942 case VARYING_SLOT_POS:
1943 case VARYING_SLOT_FACE:
1944 case VARYING_SLOT_PNTC:
1945 return false;
1946 default:
1947 return true;
1948 }
1949 }
1950 return false;
1951 }
1952
1953
1954 /**
1955 * Visitor class that generates tfeedback_candidate structs describing all
1956 * possible targets of transform feedback.
1957 *
1958 * tfeedback_candidate structs are stored in the hash table
1959 * tfeedback_candidates, which is passed to the constructor. This hash table
1960 * maps varying names to instances of the tfeedback_candidate struct.
1961 */
1962 class tfeedback_candidate_generator : public program_resource_visitor
1963 {
1964 public:
1965 tfeedback_candidate_generator(void *mem_ctx,
1966 hash_table *tfeedback_candidates)
1967 : mem_ctx(mem_ctx),
1968 tfeedback_candidates(tfeedback_candidates),
1969 toplevel_var(NULL),
1970 varying_floats(0)
1971 {
1972 }
1973
1974 void process(ir_variable *var)
1975 {
1976 /* All named varying interface blocks should be flattened by now */
1977 assert(!var->is_interface_instance());
1978
1979 this->toplevel_var = var;
1980 this->varying_floats = 0;
1981 program_resource_visitor::process(var, false);
1982 }
1983
1984 private:
1985 virtual void visit_field(const glsl_type *type, const char *name,
1986 bool /* row_major */,
1987 const glsl_type * /* record_type */,
1988 const enum glsl_interface_packing,
1989 bool /* last_field */)
1990 {
1991 assert(!type->without_array()->is_record());
1992 assert(!type->without_array()->is_interface());
1993
1994 tfeedback_candidate *candidate
1995 = rzalloc(this->mem_ctx, tfeedback_candidate);
1996 candidate->toplevel_var = this->toplevel_var;
1997 candidate->type = type;
1998 candidate->offset = this->varying_floats;
1999 _mesa_hash_table_insert(this->tfeedback_candidates,
2000 ralloc_strdup(this->mem_ctx, name),
2001 candidate);
2002 this->varying_floats += type->component_slots();
2003 }
2004
2005 /**
2006 * Memory context used to allocate hash table keys and values.
2007 */
2008 void * const mem_ctx;
2009
2010 /**
2011 * Hash table in which tfeedback_candidate objects should be stored.
2012 */
2013 hash_table * const tfeedback_candidates;
2014
2015 /**
2016 * Pointer to the toplevel variable that is being traversed.
2017 */
2018 ir_variable *toplevel_var;
2019
2020 /**
2021 * Total number of varying floats that have been visited so far. This is
2022 * used to determine the offset to each varying within the toplevel
2023 * variable.
2024 */
2025 unsigned varying_floats;
2026 };
2027
2028
2029 namespace linker {
2030
2031 void
2032 populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
2033 hash_table *consumer_inputs,
2034 hash_table *consumer_interface_inputs,
2035 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2036 {
2037 memset(consumer_inputs_with_locations,
2038 0,
2039 sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
2040
2041 foreach_in_list(ir_instruction, node, ir) {
2042 ir_variable *const input_var = node->as_variable();
2043
2044 if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
2045 /* All interface blocks should have been lowered by this point */
2046 assert(!input_var->type->is_interface());
2047
2048 if (input_var->data.explicit_location) {
2049 /* assign_varying_locations only cares about finding the
2050 * ir_variable at the start of a contiguous location block.
2051 *
2052 * - For !producer, consumer_inputs_with_locations isn't used.
2053 *
2054 * - For !consumer, consumer_inputs_with_locations is empty.
2055 *
2056 * For consumer && producer, if you were trying to set some
2057 * ir_variable to the middle of a location block on the other side
2058 * of producer/consumer, cross_validate_outputs_to_inputs() should
2059 * be link-erroring due to either type mismatch or location
2060 * overlaps. If the variables do match up, then they've got a
2061 * matching data.location and you only looked at
2062 * consumer_inputs_with_locations[var->data.location], not any
2063 * following entries for the array/structure.
2064 */
2065 consumer_inputs_with_locations[input_var->data.location] =
2066 input_var;
2067 } else if (input_var->get_interface_type() != NULL) {
2068 char *const iface_field_name =
2069 ralloc_asprintf(mem_ctx, "%s.%s",
2070 input_var->get_interface_type()->without_array()->name,
2071 input_var->name);
2072 _mesa_hash_table_insert(consumer_interface_inputs,
2073 iface_field_name, input_var);
2074 } else {
2075 _mesa_hash_table_insert(consumer_inputs,
2076 ralloc_strdup(mem_ctx, input_var->name),
2077 input_var);
2078 }
2079 }
2080 }
2081 }
2082
2083 /**
2084 * Find a variable from the consumer that "matches" the specified variable
2085 *
2086 * This function only finds inputs with names that match. There is no
2087 * validation (here) that the types, etc. are compatible.
2088 */
2089 ir_variable *
2090 get_matching_input(void *mem_ctx,
2091 const ir_variable *output_var,
2092 hash_table *consumer_inputs,
2093 hash_table *consumer_interface_inputs,
2094 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2095 {
2096 ir_variable *input_var;
2097
2098 if (output_var->data.explicit_location) {
2099 input_var = consumer_inputs_with_locations[output_var->data.location];
2100 } else if (output_var->get_interface_type() != NULL) {
2101 char *const iface_field_name =
2102 ralloc_asprintf(mem_ctx, "%s.%s",
2103 output_var->get_interface_type()->without_array()->name,
2104 output_var->name);
2105 hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
2106 input_var = entry ? (ir_variable *) entry->data : NULL;
2107 } else {
2108 hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
2109 input_var = entry ? (ir_variable *) entry->data : NULL;
2110 }
2111
2112 return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2113 ? NULL : input_var;
2114 }
2115
2116 }
2117
2118 static int
2119 io_variable_cmp(const void *_a, const void *_b)
2120 {
2121 const ir_variable *const a = *(const ir_variable **) _a;
2122 const ir_variable *const b = *(const ir_variable **) _b;
2123
2124 if (a->data.explicit_location && b->data.explicit_location)
2125 return b->data.location - a->data.location;
2126
2127 if (a->data.explicit_location && !b->data.explicit_location)
2128 return 1;
2129
2130 if (!a->data.explicit_location && b->data.explicit_location)
2131 return -1;
2132
2133 return -strcmp(a->name, b->name);
2134 }
2135
2136 /**
2137 * Sort the shader IO variables into canonical order
2138 */
2139 static void
2140 canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
2141 {
2142 ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
2143 unsigned num_variables = 0;
2144
2145 foreach_in_list(ir_instruction, node, ir) {
2146 ir_variable *const var = node->as_variable();
2147
2148 if (var == NULL || var->data.mode != io_mode)
2149 continue;
2150
2151 /* If we have already encountered more I/O variables that could
2152 * successfully link, bail.
2153 */
2154 if (num_variables == ARRAY_SIZE(var_table))
2155 return;
2156
2157 var_table[num_variables++] = var;
2158 }
2159
2160 if (num_variables == 0)
2161 return;
2162
2163 /* Sort the list in reverse order (io_variable_cmp handles this). Later
2164 * we're going to push the variables on to the IR list as a stack, so we
2165 * want the last variable (in canonical order) to be first in the list.
2166 */
2167 qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
2168
2169 /* Remove the variable from it's current location in the IR, and put it at
2170 * the front.
2171 */
2172 for (unsigned i = 0; i < num_variables; i++) {
2173 var_table[i]->remove();
2174 ir->push_head(var_table[i]);
2175 }
2176 }
2177
2178 /**
2179 * Generate a bitfield map of the explicit locations for shader varyings.
2180 *
2181 * Note: For Tessellation shaders we are sitting right on the limits of the
2182 * 64 bit map. Per-vertex and per-patch both have separate location domains
2183 * with a max of MAX_VARYING.
2184 */
2185 static uint64_t
2186 reserved_varying_slot(struct gl_linked_shader *stage,
2187 ir_variable_mode io_mode)
2188 {
2189 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
2190 /* Avoid an overflow of the returned value */
2191 assert(MAX_VARYINGS_INCL_PATCH <= 64);
2192
2193 uint64_t slots = 0;
2194 int var_slot;
2195
2196 if (!stage)
2197 return slots;
2198
2199 foreach_in_list(ir_instruction, node, stage->ir) {
2200 ir_variable *const var = node->as_variable();
2201
2202 if (var == NULL || var->data.mode != io_mode ||
2203 !var->data.explicit_location ||
2204 var->data.location < VARYING_SLOT_VAR0)
2205 continue;
2206
2207 var_slot = var->data.location - VARYING_SLOT_VAR0;
2208
2209 unsigned num_elements = get_varying_type(var, stage->Stage)
2210 ->count_attribute_slots(io_mode == ir_var_shader_in &&
2211 stage->Stage == MESA_SHADER_VERTEX);
2212 for (unsigned i = 0; i < num_elements; i++) {
2213 if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
2214 slots |= UINT64_C(1) << var_slot;
2215 var_slot += 1;
2216 }
2217 }
2218
2219 return slots;
2220 }
2221
2222
2223 /**
2224 * Assign locations for all variables that are produced in one pipeline stage
2225 * (the "producer") and consumed in the next stage (the "consumer").
2226 *
2227 * Variables produced by the producer may also be consumed by transform
2228 * feedback.
2229 *
2230 * \param num_tfeedback_decls is the number of declarations indicating
2231 * variables that may be consumed by transform feedback.
2232 *
2233 * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
2234 * representing the result of parsing the strings passed to
2235 * glTransformFeedbackVaryings(). assign_location() will be called for
2236 * each of these objects that matches one of the outputs of the
2237 * producer.
2238 *
2239 * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
2240 * be NULL. In this case, varying locations are assigned solely based on the
2241 * requirements of transform feedback.
2242 */
2243 static bool
2244 assign_varying_locations(struct gl_context *ctx,
2245 void *mem_ctx,
2246 struct gl_shader_program *prog,
2247 gl_linked_shader *producer,
2248 gl_linked_shader *consumer,
2249 unsigned num_tfeedback_decls,
2250 tfeedback_decl *tfeedback_decls,
2251 const uint64_t reserved_slots)
2252 {
2253 /* Tessellation shaders treat inputs and outputs as shared memory and can
2254 * access inputs and outputs of other invocations.
2255 * Therefore, they can't be lowered to temps easily (and definitely not
2256 * efficiently).
2257 */
2258 bool unpackable_tess =
2259 (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
2260 (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
2261 (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
2262
2263 /* Transform feedback code assumes varying arrays are packed, so if the
2264 * driver has disabled varying packing, make sure to at least enable
2265 * packing required by transform feedback.
2266 */
2267 bool xfb_enabled =
2268 ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
2269
2270 /* Disable packing on outward facing interfaces for SSO because in ES we
2271 * need to retain the unpacked varying information for draw time
2272 * validation.
2273 *
2274 * Packing is still enabled on individual arrays, structs, and matrices as
2275 * these are required by the transform feedback code and it is still safe
2276 * to do so. We also enable packing when a varying is only used for
2277 * transform feedback and its not a SSO.
2278 */
2279 bool disable_varying_packing =
2280 ctx->Const.DisableVaryingPacking || unpackable_tess;
2281 if (prog->SeparateShader && (producer == NULL || consumer == NULL))
2282 disable_varying_packing = true;
2283
2284 varying_matches matches(disable_varying_packing, xfb_enabled,
2285 ctx->Extensions.ARB_enhanced_layouts,
2286 producer ? producer->Stage : MESA_SHADER_NONE,
2287 consumer ? consumer->Stage : MESA_SHADER_NONE);
2288 hash_table *tfeedback_candidates =
2289 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2290 _mesa_key_string_equal);
2291 hash_table *consumer_inputs =
2292 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2293 _mesa_key_string_equal);
2294 hash_table *consumer_interface_inputs =
2295 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2296 _mesa_key_string_equal);
2297 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
2298 NULL,
2299 };
2300
2301 unsigned consumer_vertices = 0;
2302 if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
2303 consumer_vertices = prog->Geom.VerticesIn;
2304
2305 /* Operate in a total of four passes.
2306 *
2307 * 1. Sort inputs / outputs into a canonical order. This is necessary so
2308 * that inputs / outputs of separable shaders will be assigned
2309 * predictable locations regardless of the order in which declarations
2310 * appeared in the shader source.
2311 *
2312 * 2. Assign locations for any matching inputs and outputs.
2313 *
2314 * 3. Mark output variables in the producer that do not have locations as
2315 * not being outputs. This lets the optimizer eliminate them.
2316 *
2317 * 4. Mark input variables in the consumer that do not have locations as
2318 * not being inputs. This lets the optimizer eliminate them.
2319 */
2320 if (consumer)
2321 canonicalize_shader_io(consumer->ir, ir_var_shader_in);
2322
2323 if (producer)
2324 canonicalize_shader_io(producer->ir, ir_var_shader_out);
2325
2326 if (consumer)
2327 linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
2328 consumer_inputs,
2329 consumer_interface_inputs,
2330 consumer_inputs_with_locations);
2331
2332 if (producer) {
2333 foreach_in_list(ir_instruction, node, producer->ir) {
2334 ir_variable *const output_var = node->as_variable();
2335
2336 if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
2337 continue;
2338
2339 /* Only geometry shaders can use non-zero streams */
2340 assert(output_var->data.stream == 0 ||
2341 (output_var->data.stream < MAX_VERTEX_STREAMS &&
2342 producer->Stage == MESA_SHADER_GEOMETRY));
2343
2344 if (num_tfeedback_decls > 0) {
2345 tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates);
2346 g.process(output_var);
2347 }
2348
2349 ir_variable *const input_var =
2350 linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
2351 consumer_interface_inputs,
2352 consumer_inputs_with_locations);
2353
2354 /* If a matching input variable was found, add this output (and the
2355 * input) to the set. If this is a separable program and there is no
2356 * consumer stage, add the output.
2357 *
2358 * Always add TCS outputs. They are shared by all invocations
2359 * within a patch and can be used as shared memory.
2360 */
2361 if (input_var || (prog->SeparateShader && consumer == NULL) ||
2362 producer->Stage == MESA_SHADER_TESS_CTRL) {
2363 matches.record(output_var, input_var);
2364 }
2365
2366 /* Only stream 0 outputs can be consumed in the next stage */
2367 if (input_var && output_var->data.stream != 0) {
2368 linker_error(prog, "output %s is assigned to stream=%d but "
2369 "is linked to an input, which requires stream=0",
2370 output_var->name, output_var->data.stream);
2371 return false;
2372 }
2373 }
2374 } else {
2375 /* If there's no producer stage, then this must be a separable program.
2376 * For example, we may have a program that has just a fragment shader.
2377 * Later this program will be used with some arbitrary vertex (or
2378 * geometry) shader program. This means that locations must be assigned
2379 * for all the inputs.
2380 */
2381 foreach_in_list(ir_instruction, node, consumer->ir) {
2382 ir_variable *const input_var = node->as_variable();
2383
2384 if (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2385 continue;
2386
2387 matches.record(NULL, input_var);
2388 }
2389 }
2390
2391 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2392 if (!tfeedback_decls[i].is_varying())
2393 continue;
2394
2395 const tfeedback_candidate *matched_candidate
2396 = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
2397
2398 if (matched_candidate == NULL) {
2399 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2400 return false;
2401 }
2402
2403 /* Mark xfb varyings as always active */
2404 matched_candidate->toplevel_var->data.always_active_io = 1;
2405
2406 /* Mark any corresponding inputs as always active also. We must do this
2407 * because we have a NIR pass that lowers vectors to scalars and another
2408 * that removes unused varyings.
2409 * We don't split varyings marked as always active because there is no
2410 * point in doing so. This means we need to mark both sides of the
2411 * interface as always active otherwise we will have a mismatch and
2412 * start removing things we shouldn't.
2413 */
2414 ir_variable *const input_var =
2415 linker::get_matching_input(mem_ctx, matched_candidate->toplevel_var,
2416 consumer_inputs,
2417 consumer_interface_inputs,
2418 consumer_inputs_with_locations);
2419 if (input_var)
2420 input_var->data.always_active_io = 1;
2421
2422 if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
2423 matched_candidate->toplevel_var->data.is_xfb_only = 1;
2424 matches.record(matched_candidate->toplevel_var, NULL);
2425 }
2426 }
2427
2428 _mesa_hash_table_destroy(consumer_inputs, NULL);
2429 _mesa_hash_table_destroy(consumer_interface_inputs, NULL);
2430
2431 uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
2432 const unsigned slots_used = matches.assign_locations(
2433 prog, components, reserved_slots);
2434 matches.store_locations();
2435
2436 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2437 if (!tfeedback_decls[i].is_varying())
2438 continue;
2439
2440 if (!tfeedback_decls[i].assign_location(ctx, prog)) {
2441 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2442 return false;
2443 }
2444 }
2445 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2446
2447 if (consumer && producer) {
2448 foreach_in_list(ir_instruction, node, consumer->ir) {
2449 ir_variable *const var = node->as_variable();
2450
2451 if (var && var->data.mode == ir_var_shader_in &&
2452 var->data.is_unmatched_generic_inout) {
2453 if (!prog->IsES && prog->data->Version <= 120) {
2454 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
2455 *
2456 * Only those varying variables used (i.e. read) in
2457 * the fragment shader executable must be written to
2458 * by the vertex shader executable; declaring
2459 * superfluous varying variables in a vertex shader is
2460 * permissible.
2461 *
2462 * We interpret this text as meaning that the VS must
2463 * write the variable for the FS to read it. See
2464 * "glsl1-varying read but not written" in piglit.
2465 */
2466 linker_error(prog, "%s shader varying %s not written "
2467 "by %s shader\n.",
2468 _mesa_shader_stage_to_string(consumer->Stage),
2469 var->name,
2470 _mesa_shader_stage_to_string(producer->Stage));
2471 } else {
2472 linker_warning(prog, "%s shader varying %s not written "
2473 "by %s shader\n.",
2474 _mesa_shader_stage_to_string(consumer->Stage),
2475 var->name,
2476 _mesa_shader_stage_to_string(producer->Stage));
2477 }
2478 }
2479 }
2480
2481 /* Now that validation is done its safe to remove unused varyings. As
2482 * we have both a producer and consumer its safe to remove unused
2483 * varyings even if the program is a SSO because the stages are being
2484 * linked together i.e. we have a multi-stage SSO.
2485 */
2486 remove_unused_shader_inputs_and_outputs(false, producer,
2487 ir_var_shader_out);
2488 remove_unused_shader_inputs_and_outputs(false, consumer,
2489 ir_var_shader_in);
2490 }
2491
2492 if (producer) {
2493 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
2494 0, producer, disable_varying_packing,
2495 xfb_enabled);
2496 }
2497
2498 if (consumer) {
2499 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
2500 consumer_vertices, consumer,
2501 disable_varying_packing, xfb_enabled);
2502 }
2503
2504 return true;
2505 }
2506
2507 static bool
2508 check_against_output_limit(struct gl_context *ctx,
2509 struct gl_shader_program *prog,
2510 gl_linked_shader *producer,
2511 unsigned num_explicit_locations)
2512 {
2513 unsigned output_vectors = num_explicit_locations;
2514
2515 foreach_in_list(ir_instruction, node, producer->ir) {
2516 ir_variable *const var = node->as_variable();
2517
2518 if (var && !var->data.explicit_location &&
2519 var->data.mode == ir_var_shader_out &&
2520 var_counts_against_varying_limit(producer->Stage, var)) {
2521 /* outputs for fragment shader can't be doubles */
2522 output_vectors += var->type->count_attribute_slots(false);
2523 }
2524 }
2525
2526 assert(producer->Stage != MESA_SHADER_FRAGMENT);
2527 unsigned max_output_components =
2528 ctx->Const.Program[producer->Stage].MaxOutputComponents;
2529
2530 const unsigned output_components = output_vectors * 4;
2531 if (output_components > max_output_components) {
2532 if (ctx->API == API_OPENGLES2 || prog->IsES)
2533 linker_error(prog, "%s shader uses too many output vectors "
2534 "(%u > %u)\n",
2535 _mesa_shader_stage_to_string(producer->Stage),
2536 output_vectors,
2537 max_output_components / 4);
2538 else
2539 linker_error(prog, "%s shader uses too many output components "
2540 "(%u > %u)\n",
2541 _mesa_shader_stage_to_string(producer->Stage),
2542 output_components,
2543 max_output_components);
2544
2545 return false;
2546 }
2547
2548 return true;
2549 }
2550
2551 static bool
2552 check_against_input_limit(struct gl_context *ctx,
2553 struct gl_shader_program *prog,
2554 gl_linked_shader *consumer,
2555 unsigned num_explicit_locations)
2556 {
2557 unsigned input_vectors = num_explicit_locations;
2558
2559 foreach_in_list(ir_instruction, node, consumer->ir) {
2560 ir_variable *const var = node->as_variable();
2561
2562 if (var && !var->data.explicit_location &&
2563 var->data.mode == ir_var_shader_in &&
2564 var_counts_against_varying_limit(consumer->Stage, var)) {
2565 /* vertex inputs aren't varying counted */
2566 input_vectors += var->type->count_attribute_slots(false);
2567 }
2568 }
2569
2570 assert(consumer->Stage != MESA_SHADER_VERTEX);
2571 unsigned max_input_components =
2572 ctx->Const.Program[consumer->Stage].MaxInputComponents;
2573
2574 const unsigned input_components = input_vectors * 4;
2575 if (input_components > max_input_components) {
2576 if (ctx->API == API_OPENGLES2 || prog->IsES)
2577 linker_error(prog, "%s shader uses too many input vectors "
2578 "(%u > %u)\n",
2579 _mesa_shader_stage_to_string(consumer->Stage),
2580 input_vectors,
2581 max_input_components / 4);
2582 else
2583 linker_error(prog, "%s shader uses too many input components "
2584 "(%u > %u)\n",
2585 _mesa_shader_stage_to_string(consumer->Stage),
2586 input_components,
2587 max_input_components);
2588
2589 return false;
2590 }
2591
2592 return true;
2593 }
2594
2595 bool
2596 link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
2597 struct gl_context *ctx, void *mem_ctx)
2598 {
2599 bool has_xfb_qualifiers = false;
2600 unsigned num_tfeedback_decls = 0;
2601 char **varying_names = NULL;
2602 tfeedback_decl *tfeedback_decls = NULL;
2603
2604 /* From the ARB_enhanced_layouts spec:
2605 *
2606 * "If the shader used to record output variables for transform feedback
2607 * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
2608 * qualifiers, the values specified by TransformFeedbackVaryings are
2609 * ignored, and the set of variables captured for transform feedback is
2610 * instead derived from the specified layout qualifiers."
2611 */
2612 for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
2613 /* Find last stage before fragment shader */
2614 if (prog->_LinkedShaders[i]) {
2615 has_xfb_qualifiers =
2616 process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
2617 prog, &num_tfeedback_decls,
2618 &varying_names);
2619 break;
2620 }
2621 }
2622
2623 if (!has_xfb_qualifiers) {
2624 num_tfeedback_decls = prog->TransformFeedback.NumVarying;
2625 varying_names = prog->TransformFeedback.VaryingNames;
2626 }
2627
2628 if (num_tfeedback_decls != 0) {
2629 /* From GL_EXT_transform_feedback:
2630 * A program will fail to link if:
2631 *
2632 * * the <count> specified by TransformFeedbackVaryingsEXT is
2633 * non-zero, but the program object has no vertex or geometry
2634 * shader;
2635 */
2636 if (first >= MESA_SHADER_FRAGMENT) {
2637 linker_error(prog, "Transform feedback varyings specified, but "
2638 "no vertex, tessellation, or geometry shader is "
2639 "present.\n");
2640 return false;
2641 }
2642
2643 tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
2644 num_tfeedback_decls);
2645 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2646 varying_names, tfeedback_decls))
2647 return false;
2648 }
2649
2650 /* If there is no fragment shader we need to set transform feedback.
2651 *
2652 * For SSO we also need to assign output locations. We assign them here
2653 * because we need to do it for both single stage programs and multi stage
2654 * programs.
2655 */
2656 if (last < MESA_SHADER_FRAGMENT &&
2657 (num_tfeedback_decls != 0 || prog->SeparateShader)) {
2658 const uint64_t reserved_out_slots =
2659 reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
2660 if (!assign_varying_locations(ctx, mem_ctx, prog,
2661 prog->_LinkedShaders[last], NULL,
2662 num_tfeedback_decls, tfeedback_decls,
2663 reserved_out_slots))
2664 return false;
2665 }
2666
2667 if (last <= MESA_SHADER_FRAGMENT) {
2668 /* Remove unused varyings from the first/last stage unless SSO */
2669 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2670 prog->_LinkedShaders[first],
2671 ir_var_shader_in);
2672 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2673 prog->_LinkedShaders[last],
2674 ir_var_shader_out);
2675
2676 /* If the program is made up of only a single stage */
2677 if (first == last) {
2678 gl_linked_shader *const sh = prog->_LinkedShaders[last];
2679
2680 do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
2681 do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
2682 tfeedback_decls);
2683
2684 if (prog->SeparateShader) {
2685 const uint64_t reserved_slots =
2686 reserved_varying_slot(sh, ir_var_shader_in);
2687
2688 /* Assign input locations for SSO, output locations are already
2689 * assigned.
2690 */
2691 if (!assign_varying_locations(ctx, mem_ctx, prog,
2692 NULL /* producer */,
2693 sh /* consumer */,
2694 0 /* num_tfeedback_decls */,
2695 NULL /* tfeedback_decls */,
2696 reserved_slots))
2697 return false;
2698 }
2699 } else {
2700 /* Linking the stages in the opposite order (from fragment to vertex)
2701 * ensures that inter-shader outputs written to in an earlier stage
2702 * are eliminated if they are (transitively) not used in a later
2703 * stage.
2704 */
2705 int next = last;
2706 for (int i = next - 1; i >= 0; i--) {
2707 if (prog->_LinkedShaders[i] == NULL && i != 0)
2708 continue;
2709
2710 gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
2711 gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
2712
2713 const uint64_t reserved_out_slots =
2714 reserved_varying_slot(sh_i, ir_var_shader_out);
2715 const uint64_t reserved_in_slots =
2716 reserved_varying_slot(sh_next, ir_var_shader_in);
2717
2718 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2719 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2720 tfeedback_decls);
2721
2722 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2723 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2724 tfeedback_decls,
2725 reserved_out_slots | reserved_in_slots))
2726 return false;
2727
2728 /* This must be done after all dead varyings are eliminated. */
2729 if (sh_i != NULL) {
2730 unsigned slots_used = _mesa_bitcount_64(reserved_out_slots);
2731 if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
2732 return false;
2733 }
2734 }
2735
2736 unsigned slots_used = _mesa_bitcount_64(reserved_in_slots);
2737 if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
2738 return false;
2739
2740 next = i;
2741 }
2742 }
2743 }
2744
2745 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
2746 has_xfb_qualifiers))
2747 return false;
2748
2749 return true;
2750 }