glsl/linker: always validate explicit location among inputs
[mesa.git] / src / compiler / glsl / link_varyings.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file link_varyings.cpp
26 *
27 * Linker functions related specifically to linking varyings between shader
28 * stages.
29 */
30
31
32 #include "main/errors.h"
33 #include "main/mtypes.h"
34 #include "glsl_symbol_table.h"
35 #include "glsl_parser_extras.h"
36 #include "ir_optimization.h"
37 #include "linker.h"
38 #include "link_varyings.h"
39 #include "main/macros.h"
40 #include "util/hash_table.h"
41 #include "util/u_math.h"
42 #include "program.h"
43
44
45 /**
46 * Get the varying type stripped of the outermost array if we're processing
47 * a stage whose varyings are arrays indexed by a vertex number (such as
48 * geometry shader inputs).
49 */
50 static const glsl_type *
51 get_varying_type(const ir_variable *var, gl_shader_stage stage)
52 {
53 const glsl_type *type = var->type;
54
55 if (!var->data.patch &&
56 ((var->data.mode == ir_var_shader_out &&
57 stage == MESA_SHADER_TESS_CTRL) ||
58 (var->data.mode == ir_var_shader_in &&
59 (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
60 stage == MESA_SHADER_GEOMETRY)))) {
61 assert(type->is_array());
62 type = type->fields.array;
63 }
64
65 return type;
66 }
67
68 static void
69 create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
70 size_t name_length, unsigned *count,
71 const char *ifc_member_name,
72 const glsl_type *ifc_member_t, char ***varying_names)
73 {
74 if (t->is_interface()) {
75 size_t new_length = name_length;
76
77 assert(ifc_member_name && ifc_member_t);
78 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
79
80 create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
81 NULL, NULL, varying_names);
82 } else if (t->is_struct()) {
83 for (unsigned i = 0; i < t->length; i++) {
84 const char *field = t->fields.structure[i].name;
85 size_t new_length = name_length;
86
87 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
88
89 create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
90 new_length, count, NULL, NULL,
91 varying_names);
92 }
93 } else if (t->without_array()->is_struct() ||
94 t->without_array()->is_interface() ||
95 (t->is_array() && t->fields.array->is_array())) {
96 for (unsigned i = 0; i < t->length; i++) {
97 size_t new_length = name_length;
98
99 /* Append the subscript to the current variable name */
100 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
101
102 create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
103 count, ifc_member_name, ifc_member_t,
104 varying_names);
105 }
106 } else {
107 (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
108 }
109 }
110
111 static bool
112 process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
113 struct gl_shader_program *prog,
114 unsigned *num_tfeedback_decls,
115 char ***varying_names)
116 {
117 bool has_xfb_qualifiers = false;
118
119 /* We still need to enable transform feedback mode even if xfb_stride is
120 * only applied to a global out. Also we don't bother to propagate
121 * xfb_stride to interface block members so this will catch that case also.
122 */
123 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
124 if (prog->TransformFeedback.BufferStride[j]) {
125 has_xfb_qualifiers = true;
126 break;
127 }
128 }
129
130 foreach_in_list(ir_instruction, node, sh->ir) {
131 ir_variable *var = node->as_variable();
132 if (!var || var->data.mode != ir_var_shader_out)
133 continue;
134
135 /* From the ARB_enhanced_layouts spec:
136 *
137 * "Any shader making any static use (after preprocessing) of any of
138 * these *xfb_* qualifiers will cause the shader to be in a
139 * transform feedback capturing mode and hence responsible for
140 * describing the transform feedback setup. This mode will capture
141 * any output selected by *xfb_offset*, directly or indirectly, to
142 * a transform feedback buffer."
143 */
144 if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
145 has_xfb_qualifiers = true;
146 }
147
148 if (var->data.explicit_xfb_offset) {
149 *num_tfeedback_decls += var->type->varying_count();
150 has_xfb_qualifiers = true;
151 }
152 }
153
154 if (*num_tfeedback_decls == 0)
155 return has_xfb_qualifiers;
156
157 unsigned i = 0;
158 *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
159 foreach_in_list(ir_instruction, node, sh->ir) {
160 ir_variable *var = node->as_variable();
161 if (!var || var->data.mode != ir_var_shader_out)
162 continue;
163
164 if (var->data.explicit_xfb_offset) {
165 char *name;
166 const glsl_type *type, *member_type;
167
168 if (var->data.from_named_ifc_block) {
169 type = var->get_interface_type();
170
171 /* Find the member type before it was altered by lowering */
172 const glsl_type *type_wa = type->without_array();
173 member_type =
174 type_wa->fields.structure[type_wa->field_index(var->name)].type;
175 name = ralloc_strdup(NULL, type_wa->name);
176 } else {
177 type = var->type;
178 member_type = NULL;
179 name = ralloc_strdup(NULL, var->name);
180 }
181 create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
182 var->name, member_type, varying_names);
183 ralloc_free(name);
184 }
185 }
186
187 assert(i == *num_tfeedback_decls);
188 return has_xfb_qualifiers;
189 }
190
191 /**
192 * Validate the types and qualifiers of an output from one stage against the
193 * matching input to another stage.
194 */
195 static void
196 cross_validate_types_and_qualifiers(struct gl_context *ctx,
197 struct gl_shader_program *prog,
198 const ir_variable *input,
199 const ir_variable *output,
200 gl_shader_stage consumer_stage,
201 gl_shader_stage producer_stage)
202 {
203 /* Check that the types match between stages.
204 */
205 const glsl_type *type_to_match = input->type;
206
207 /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
208 const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
209 consumer_stage != MESA_SHADER_FRAGMENT) ||
210 consumer_stage == MESA_SHADER_GEOMETRY;
211 if (extra_array_level) {
212 assert(type_to_match->is_array());
213 type_to_match = type_to_match->fields.array;
214 }
215
216 if (type_to_match != output->type) {
217 /* There is a bit of a special case for gl_TexCoord. This
218 * built-in is unsized by default. Applications that variable
219 * access it must redeclare it with a size. There is some
220 * language in the GLSL spec that implies the fragment shader
221 * and vertex shader do not have to agree on this size. Other
222 * driver behave this way, and one or two applications seem to
223 * rely on it.
224 *
225 * Neither declaration needs to be modified here because the array
226 * sizes are fixed later when update_array_sizes is called.
227 *
228 * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
229 *
230 * "Unlike user-defined varying variables, the built-in
231 * varying variables don't have a strict one-to-one
232 * correspondence between the vertex language and the
233 * fragment language."
234 */
235 if (!output->type->is_array() || !is_gl_identifier(output->name)) {
236 linker_error(prog,
237 "%s shader output `%s' declared as type `%s', "
238 "but %s shader input declared as type `%s'\n",
239 _mesa_shader_stage_to_string(producer_stage),
240 output->name,
241 output->type->name,
242 _mesa_shader_stage_to_string(consumer_stage),
243 input->type->name);
244 return;
245 }
246 }
247
248 /* Check that all of the qualifiers match between stages.
249 */
250
251 /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
252 * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
253 * conformance test suite does not verify that the qualifiers must match.
254 * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
255 * OpenGLES 3.0 drivers, so we relax the checking in all cases.
256 */
257 if (false /* always skip the centroid check */ &&
258 prog->data->Version < (prog->IsES ? 310 : 430) &&
259 input->data.centroid != output->data.centroid) {
260 linker_error(prog,
261 "%s shader output `%s' %s centroid qualifier, "
262 "but %s shader input %s centroid qualifier\n",
263 _mesa_shader_stage_to_string(producer_stage),
264 output->name,
265 (output->data.centroid) ? "has" : "lacks",
266 _mesa_shader_stage_to_string(consumer_stage),
267 (input->data.centroid) ? "has" : "lacks");
268 return;
269 }
270
271 if (input->data.sample != output->data.sample) {
272 linker_error(prog,
273 "%s shader output `%s' %s sample qualifier, "
274 "but %s shader input %s sample qualifier\n",
275 _mesa_shader_stage_to_string(producer_stage),
276 output->name,
277 (output->data.sample) ? "has" : "lacks",
278 _mesa_shader_stage_to_string(consumer_stage),
279 (input->data.sample) ? "has" : "lacks");
280 return;
281 }
282
283 if (input->data.patch != output->data.patch) {
284 linker_error(prog,
285 "%s shader output `%s' %s patch qualifier, "
286 "but %s shader input %s patch qualifier\n",
287 _mesa_shader_stage_to_string(producer_stage),
288 output->name,
289 (output->data.patch) ? "has" : "lacks",
290 _mesa_shader_stage_to_string(consumer_stage),
291 (input->data.patch) ? "has" : "lacks");
292 return;
293 }
294
295 /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
296 *
297 * "As only outputs need be declared with invariant, an output from
298 * one shader stage will still match an input of a subsequent stage
299 * without the input being declared as invariant."
300 *
301 * while GLSL 4.20 says:
302 *
303 * "For variables leaving one shader and coming into another shader,
304 * the invariant keyword has to be used in both shaders, or a link
305 * error will result."
306 *
307 * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
308 *
309 * "The invariance of varyings that are declared in both the vertex
310 * and fragment shaders must match."
311 */
312 if (input->data.invariant != output->data.invariant &&
313 prog->data->Version < (prog->IsES ? 300 : 430)) {
314 linker_error(prog,
315 "%s shader output `%s' %s invariant qualifier, "
316 "but %s shader input %s invariant qualifier\n",
317 _mesa_shader_stage_to_string(producer_stage),
318 output->name,
319 (output->data.invariant) ? "has" : "lacks",
320 _mesa_shader_stage_to_string(consumer_stage),
321 (input->data.invariant) ? "has" : "lacks");
322 return;
323 }
324
325 /* GLSL >= 4.40 removes text requiring interpolation qualifiers
326 * to match cross stage, they must only match within the same stage.
327 *
328 * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
329 *
330 * "It is a link-time error if, within the same stage, the interpolation
331 * qualifiers of variables of the same name do not match.
332 *
333 * Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
334 *
335 * "When no interpolation qualifier is present, smooth interpolation
336 * is used."
337 *
338 * So we match variables where one is smooth and the other has no explicit
339 * qualifier.
340 */
341 unsigned input_interpolation = input->data.interpolation;
342 unsigned output_interpolation = output->data.interpolation;
343 if (prog->IsES) {
344 if (input_interpolation == INTERP_MODE_NONE)
345 input_interpolation = INTERP_MODE_SMOOTH;
346 if (output_interpolation == INTERP_MODE_NONE)
347 output_interpolation = INTERP_MODE_SMOOTH;
348 }
349 if (input_interpolation != output_interpolation &&
350 prog->data->Version < 440) {
351 if (!ctx->Const.AllowGLSLCrossStageInterpolationMismatch) {
352 linker_error(prog,
353 "%s shader output `%s' specifies %s "
354 "interpolation qualifier, "
355 "but %s shader input specifies %s "
356 "interpolation qualifier\n",
357 _mesa_shader_stage_to_string(producer_stage),
358 output->name,
359 interpolation_string(output->data.interpolation),
360 _mesa_shader_stage_to_string(consumer_stage),
361 interpolation_string(input->data.interpolation));
362 return;
363 } else {
364 linker_warning(prog,
365 "%s shader output `%s' specifies %s "
366 "interpolation qualifier, "
367 "but %s shader input specifies %s "
368 "interpolation qualifier\n",
369 _mesa_shader_stage_to_string(producer_stage),
370 output->name,
371 interpolation_string(output->data.interpolation),
372 _mesa_shader_stage_to_string(consumer_stage),
373 interpolation_string(input->data.interpolation));
374 }
375 }
376 }
377
378 /**
379 * Validate front and back color outputs against single color input
380 */
381 static void
382 cross_validate_front_and_back_color(struct gl_context *ctx,
383 struct gl_shader_program *prog,
384 const ir_variable *input,
385 const ir_variable *front_color,
386 const ir_variable *back_color,
387 gl_shader_stage consumer_stage,
388 gl_shader_stage producer_stage)
389 {
390 if (front_color != NULL && front_color->data.assigned)
391 cross_validate_types_and_qualifiers(ctx, prog, input, front_color,
392 consumer_stage, producer_stage);
393
394 if (back_color != NULL && back_color->data.assigned)
395 cross_validate_types_and_qualifiers(ctx, prog, input, back_color,
396 consumer_stage, producer_stage);
397 }
398
399 static unsigned
400 compute_variable_location_slot(ir_variable *var, gl_shader_stage stage)
401 {
402 unsigned location_start = VARYING_SLOT_VAR0;
403
404 switch (stage) {
405 case MESA_SHADER_VERTEX:
406 if (var->data.mode == ir_var_shader_in)
407 location_start = VERT_ATTRIB_GENERIC0;
408 break;
409 case MESA_SHADER_TESS_CTRL:
410 case MESA_SHADER_TESS_EVAL:
411 if (var->data.patch)
412 location_start = VARYING_SLOT_PATCH0;
413 break;
414 case MESA_SHADER_FRAGMENT:
415 if (var->data.mode == ir_var_shader_out)
416 location_start = FRAG_RESULT_DATA0;
417 break;
418 default:
419 break;
420 }
421
422 return var->data.location - location_start;
423 }
424
425 struct explicit_location_info {
426 ir_variable *var;
427 unsigned numerical_type;
428 unsigned interpolation;
429 bool centroid;
430 bool sample;
431 bool patch;
432 };
433
434 static inline unsigned
435 get_numerical_type(const glsl_type *type)
436 {
437 /* From the OpenGL 4.6 spec, section 4.4.1 Input Layout Qualifiers, Page 68,
438 * (Location aliasing):
439 *
440 * "Further, when location aliasing, the aliases sharing the location
441 * must have the same underlying numerical type (floating-point or
442 * integer)
443 */
444 if (type->is_float() || type->is_double())
445 return GLSL_TYPE_FLOAT;
446 return GLSL_TYPE_INT;
447 }
448
449 static bool
450 check_location_aliasing(struct explicit_location_info explicit_locations[][4],
451 ir_variable *var,
452 unsigned location,
453 unsigned component,
454 unsigned location_limit,
455 const glsl_type *type,
456 unsigned interpolation,
457 bool centroid,
458 bool sample,
459 bool patch,
460 gl_shader_program *prog,
461 gl_shader_stage stage)
462 {
463 unsigned last_comp;
464 if (type->without_array()->is_struct()) {
465 /* The component qualifier can't be used on structs so just treat
466 * all component slots as used.
467 */
468 last_comp = 4;
469 } else {
470 unsigned dmul = type->without_array()->is_64bit() ? 2 : 1;
471 last_comp = component + type->without_array()->vector_elements * dmul;
472 }
473
474 while (location < location_limit) {
475 unsigned comp = 0;
476 while (comp < 4) {
477 struct explicit_location_info *info =
478 &explicit_locations[location][comp];
479
480 if (info->var) {
481 /* Component aliasing is not alloed */
482 if (comp >= component && comp < last_comp) {
483 linker_error(prog,
484 "%s shader has multiple %sputs explicitly "
485 "assigned to location %d and component %d\n",
486 _mesa_shader_stage_to_string(stage),
487 var->data.mode == ir_var_shader_in ? "in" : "out",
488 location, comp);
489 return false;
490 } else {
491 /* For all other used components we need to have matching
492 * types, interpolation and auxiliary storage
493 */
494 if (info->numerical_type !=
495 get_numerical_type(type->without_array())) {
496 linker_error(prog,
497 "Varyings sharing the same location must "
498 "have the same underlying numerical type. "
499 "Location %u component %u\n",
500 location, comp);
501 return false;
502 }
503
504 if (info->interpolation != interpolation) {
505 linker_error(prog,
506 "%s shader has multiple %sputs at explicit "
507 "location %u with different interpolation "
508 "settings\n",
509 _mesa_shader_stage_to_string(stage),
510 var->data.mode == ir_var_shader_in ?
511 "in" : "out", location);
512 return false;
513 }
514
515 if (info->centroid != centroid ||
516 info->sample != sample ||
517 info->patch != patch) {
518 linker_error(prog,
519 "%s shader has multiple %sputs at explicit "
520 "location %u with different aux storage\n",
521 _mesa_shader_stage_to_string(stage),
522 var->data.mode == ir_var_shader_in ?
523 "in" : "out", location);
524 return false;
525 }
526 }
527 } else if (comp >= component && comp < last_comp) {
528 info->var = var;
529 info->numerical_type = get_numerical_type(type->without_array());
530 info->interpolation = interpolation;
531 info->centroid = centroid;
532 info->sample = sample;
533 info->patch = patch;
534 }
535
536 comp++;
537
538 /* We need to do some special handling for doubles as dvec3 and
539 * dvec4 consume two consecutive locations. We don't need to
540 * worry about components beginning at anything other than 0 as
541 * the spec does not allow this for dvec3 and dvec4.
542 */
543 if (comp == 4 && last_comp > 4) {
544 last_comp = last_comp - 4;
545 /* Bump location index and reset the component index */
546 location++;
547 comp = 0;
548 component = 0;
549 }
550 }
551
552 location++;
553 }
554
555 return true;
556 }
557
558 static bool
559 validate_explicit_variable_location(struct gl_context *ctx,
560 struct explicit_location_info explicit_locations[][4],
561 ir_variable *var,
562 gl_shader_program *prog,
563 gl_linked_shader *sh)
564 {
565 const glsl_type *type = get_varying_type(var, sh->Stage);
566 unsigned num_elements = type->count_attribute_slots(false);
567 unsigned idx = compute_variable_location_slot(var, sh->Stage);
568 unsigned slot_limit = idx + num_elements;
569
570 /* Vertex shader inputs and fragment shader outputs are validated in
571 * assign_attribute_or_color_locations() so we should not attempt to
572 * validate them again here.
573 */
574 unsigned slot_max;
575 if (var->data.mode == ir_var_shader_out) {
576 assert(sh->Stage != MESA_SHADER_FRAGMENT);
577 slot_max =
578 ctx->Const.Program[sh->Stage].MaxOutputComponents / 4;
579 } else {
580 assert(var->data.mode == ir_var_shader_in);
581 assert(sh->Stage != MESA_SHADER_VERTEX);
582 slot_max =
583 ctx->Const.Program[sh->Stage].MaxInputComponents / 4;
584 }
585
586 if (slot_limit > slot_max) {
587 linker_error(prog,
588 "Invalid location %u in %s shader\n",
589 idx, _mesa_shader_stage_to_string(sh->Stage));
590 return false;
591 }
592
593 const glsl_type *type_without_array = type->without_array();
594 if (type_without_array->is_interface()) {
595 for (unsigned i = 0; i < type_without_array->length; i++) {
596 glsl_struct_field *field = &type_without_array->fields.structure[i];
597 unsigned field_location = field->location -
598 (field->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0);
599 if (!check_location_aliasing(explicit_locations, var,
600 field_location,
601 0, field_location + 1,
602 field->type,
603 field->interpolation,
604 field->centroid,
605 field->sample,
606 field->patch,
607 prog, sh->Stage)) {
608 return false;
609 }
610 }
611 } else if (!check_location_aliasing(explicit_locations, var,
612 idx, var->data.location_frac,
613 slot_limit, type,
614 var->data.interpolation,
615 var->data.centroid,
616 var->data.sample,
617 var->data.patch,
618 prog, sh->Stage)) {
619 return false;
620 }
621
622 return true;
623 }
624
625 /**
626 * Validate explicit locations for the inputs to the first stage and the
627 * outputs of the last stage in an SSO program (everything in between is
628 * validated in cross_validate_outputs_to_inputs).
629 */
630 void
631 validate_sso_explicit_locations(struct gl_context *ctx,
632 struct gl_shader_program *prog,
633 gl_shader_stage first_stage,
634 gl_shader_stage last_stage)
635 {
636 assert(prog->SeparateShader);
637
638 /* VS inputs and FS outputs are validated in
639 * assign_attribute_or_color_locations()
640 */
641 bool validate_first_stage = first_stage != MESA_SHADER_VERTEX;
642 bool validate_last_stage = last_stage != MESA_SHADER_FRAGMENT;
643 if (!validate_first_stage && !validate_last_stage)
644 return;
645
646 struct explicit_location_info explicit_locations[MAX_VARYING][4];
647
648 gl_shader_stage stages[2] = { first_stage, last_stage };
649 bool validate_stage[2] = { validate_first_stage, validate_last_stage };
650 ir_variable_mode var_direction[2] = { ir_var_shader_in, ir_var_shader_out };
651
652 for (unsigned i = 0; i < 2; i++) {
653 if (!validate_stage[i])
654 continue;
655
656 gl_shader_stage stage = stages[i];
657
658 gl_linked_shader *sh = prog->_LinkedShaders[stage];
659 assert(sh);
660
661 memset(explicit_locations, 0, sizeof(explicit_locations));
662
663 foreach_in_list(ir_instruction, node, sh->ir) {
664 ir_variable *const var = node->as_variable();
665
666 if (var == NULL ||
667 !var->data.explicit_location ||
668 var->data.location < VARYING_SLOT_VAR0 ||
669 var->data.mode != var_direction[i])
670 continue;
671
672 if (!validate_explicit_variable_location(
673 ctx, explicit_locations, var, prog, sh)) {
674 return;
675 }
676 }
677 }
678 }
679
680 /**
681 * Validate that outputs from one stage match inputs of another
682 */
683 void
684 cross_validate_outputs_to_inputs(struct gl_context *ctx,
685 struct gl_shader_program *prog,
686 gl_linked_shader *producer,
687 gl_linked_shader *consumer)
688 {
689 glsl_symbol_table parameters;
690 struct explicit_location_info output_explicit_locations[MAX_VARYING][4] = { 0 };
691 struct explicit_location_info input_explicit_locations[MAX_VARYING][4] = { 0 };
692
693 /* Find all shader outputs in the "producer" stage.
694 */
695 foreach_in_list(ir_instruction, node, producer->ir) {
696 ir_variable *const var = node->as_variable();
697
698 if (var == NULL || var->data.mode != ir_var_shader_out)
699 continue;
700
701 if (!var->data.explicit_location
702 || var->data.location < VARYING_SLOT_VAR0)
703 parameters.add_variable(var);
704 else {
705 /* User-defined varyings with explicit locations are handled
706 * differently because they do not need to have matching names.
707 */
708 if (!validate_explicit_variable_location(ctx,
709 output_explicit_locations,
710 var, prog, producer)) {
711 return;
712 }
713 }
714 }
715
716
717 /* Find all shader inputs in the "consumer" stage. Any variables that have
718 * matching outputs already in the symbol table must have the same type and
719 * qualifiers.
720 *
721 * Exception: if the consumer is the geometry shader, then the inputs
722 * should be arrays and the type of the array element should match the type
723 * of the corresponding producer output.
724 */
725 foreach_in_list(ir_instruction, node, consumer->ir) {
726 ir_variable *const input = node->as_variable();
727
728 if (input == NULL || input->data.mode != ir_var_shader_in)
729 continue;
730
731 if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
732 const ir_variable *const front_color =
733 parameters.get_variable("gl_FrontColor");
734
735 const ir_variable *const back_color =
736 parameters.get_variable("gl_BackColor");
737
738 cross_validate_front_and_back_color(ctx, prog, input,
739 front_color, back_color,
740 consumer->Stage, producer->Stage);
741 } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
742 const ir_variable *const front_color =
743 parameters.get_variable("gl_FrontSecondaryColor");
744
745 const ir_variable *const back_color =
746 parameters.get_variable("gl_BackSecondaryColor");
747
748 cross_validate_front_and_back_color(ctx, prog, input,
749 front_color, back_color,
750 consumer->Stage, producer->Stage);
751 } else {
752 /* The rules for connecting inputs and outputs change in the presence
753 * of explicit locations. In this case, we no longer care about the
754 * names of the variables. Instead, we care only about the
755 * explicitly assigned location.
756 */
757 ir_variable *output = NULL;
758 if (input->data.explicit_location
759 && input->data.location >= VARYING_SLOT_VAR0) {
760
761 const glsl_type *type = get_varying_type(input, consumer->Stage);
762 unsigned num_elements = type->count_attribute_slots(false);
763 unsigned idx =
764 compute_variable_location_slot(input, consumer->Stage);
765 unsigned slot_limit = idx + num_elements;
766
767 if (!validate_explicit_variable_location(ctx,
768 input_explicit_locations,
769 input, prog, consumer)) {
770 return;
771 }
772
773 while (idx < slot_limit) {
774 if (idx >= MAX_VARYING) {
775 linker_error(prog,
776 "Invalid location %u in %s shader\n", idx,
777 _mesa_shader_stage_to_string(consumer->Stage));
778 return;
779 }
780
781 output = output_explicit_locations[idx][input->data.location_frac].var;
782
783 if (output == NULL ||
784 input->data.location != output->data.location) {
785 linker_error(prog,
786 "%s shader input `%s' with explicit location "
787 "has no matching output\n",
788 _mesa_shader_stage_to_string(consumer->Stage),
789 input->name);
790 break;
791 }
792 idx++;
793 }
794 } else {
795 output = parameters.get_variable(input->name);
796 }
797
798 if (output != NULL) {
799 /* Interface blocks have their own validation elsewhere so don't
800 * try validating them here.
801 */
802 if (!(input->get_interface_type() &&
803 output->get_interface_type()))
804 cross_validate_types_and_qualifiers(ctx, prog, input, output,
805 consumer->Stage,
806 producer->Stage);
807 } else {
808 /* Check for input vars with unmatched output vars in prev stage
809 * taking into account that interface blocks could have a matching
810 * output but with different name, so we ignore them.
811 */
812 assert(!input->data.assigned);
813 if (input->data.used && !input->get_interface_type() &&
814 !input->data.explicit_location && !prog->SeparateShader)
815 linker_error(prog,
816 "%s shader input `%s' "
817 "has no matching output in the previous stage\n",
818 _mesa_shader_stage_to_string(consumer->Stage),
819 input->name);
820 }
821 }
822 }
823 }
824
825 /**
826 * Demote shader inputs and outputs that are not used in other stages, and
827 * remove them via dead code elimination.
828 */
829 static void
830 remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
831 gl_linked_shader *sh,
832 enum ir_variable_mode mode)
833 {
834 if (is_separate_shader_object)
835 return;
836
837 foreach_in_list(ir_instruction, node, sh->ir) {
838 ir_variable *const var = node->as_variable();
839
840 if (var == NULL || var->data.mode != int(mode))
841 continue;
842
843 /* A shader 'in' or 'out' variable is only really an input or output if
844 * its value is used by other shader stages. This will cause the
845 * variable to have a location assigned.
846 */
847 if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
848 assert(var->data.mode != ir_var_temporary);
849
850 /* Assign zeros to demoted inputs to allow more optimizations. */
851 if (var->data.mode == ir_var_shader_in && !var->constant_value)
852 var->constant_value = ir_constant::zero(var, var->type);
853
854 var->data.mode = ir_var_auto;
855 }
856 }
857
858 /* Eliminate code that is now dead due to unused inputs/outputs being
859 * demoted.
860 */
861 while (do_dead_code(sh->ir, false))
862 ;
863
864 }
865
866 /**
867 * Initialize this object based on a string that was passed to
868 * glTransformFeedbackVaryings.
869 *
870 * If the input is mal-formed, this call still succeeds, but it sets
871 * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
872 * will fail to find any matching variable.
873 */
874 void
875 tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
876 const char *input)
877 {
878 /* We don't have to be pedantic about what is a valid GLSL variable name,
879 * because any variable with an invalid name can't exist in the IR anyway.
880 */
881
882 this->location = -1;
883 this->orig_name = input;
884 this->lowered_builtin_array_variable = none;
885 this->skip_components = 0;
886 this->next_buffer_separator = false;
887 this->matched_candidate = NULL;
888 this->stream_id = 0;
889 this->buffer = 0;
890 this->offset = 0;
891
892 if (ctx->Extensions.ARB_transform_feedback3) {
893 /* Parse gl_NextBuffer. */
894 if (strcmp(input, "gl_NextBuffer") == 0) {
895 this->next_buffer_separator = true;
896 return;
897 }
898
899 /* Parse gl_SkipComponents. */
900 if (strcmp(input, "gl_SkipComponents1") == 0)
901 this->skip_components = 1;
902 else if (strcmp(input, "gl_SkipComponents2") == 0)
903 this->skip_components = 2;
904 else if (strcmp(input, "gl_SkipComponents3") == 0)
905 this->skip_components = 3;
906 else if (strcmp(input, "gl_SkipComponents4") == 0)
907 this->skip_components = 4;
908
909 if (this->skip_components)
910 return;
911 }
912
913 /* Parse a declaration. */
914 const char *base_name_end;
915 long subscript = parse_program_resource_name(input, &base_name_end);
916 this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
917 if (this->var_name == NULL) {
918 _mesa_error_no_memory(__func__);
919 return;
920 }
921
922 if (subscript >= 0) {
923 this->array_subscript = subscript;
924 this->is_subscripted = true;
925 } else {
926 this->is_subscripted = false;
927 }
928
929 /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
930 * class must behave specially to account for the fact that gl_ClipDistance
931 * is converted from a float[8] to a vec4[2].
932 */
933 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
934 strcmp(this->var_name, "gl_ClipDistance") == 0) {
935 this->lowered_builtin_array_variable = clip_distance;
936 }
937 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
938 strcmp(this->var_name, "gl_CullDistance") == 0) {
939 this->lowered_builtin_array_variable = cull_distance;
940 }
941
942 if (ctx->Const.LowerTessLevel &&
943 (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
944 this->lowered_builtin_array_variable = tess_level_outer;
945 if (ctx->Const.LowerTessLevel &&
946 (strcmp(this->var_name, "gl_TessLevelInner") == 0))
947 this->lowered_builtin_array_variable = tess_level_inner;
948 }
949
950
951 /**
952 * Determine whether two tfeedback_decl objects refer to the same variable and
953 * array index (if applicable).
954 */
955 bool
956 tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
957 {
958 assert(x.is_varying() && y.is_varying());
959
960 if (strcmp(x.var_name, y.var_name) != 0)
961 return false;
962 if (x.is_subscripted != y.is_subscripted)
963 return false;
964 if (x.is_subscripted && x.array_subscript != y.array_subscript)
965 return false;
966 return true;
967 }
968
969
970 /**
971 * Assign a location and stream ID for this tfeedback_decl object based on the
972 * transform feedback candidate found by find_candidate.
973 *
974 * If an error occurs, the error is reported through linker_error() and false
975 * is returned.
976 */
977 bool
978 tfeedback_decl::assign_location(struct gl_context *ctx,
979 struct gl_shader_program *prog)
980 {
981 assert(this->is_varying());
982
983 unsigned fine_location
984 = this->matched_candidate->toplevel_var->data.location * 4
985 + this->matched_candidate->toplevel_var->data.location_frac
986 + this->matched_candidate->offset;
987 const unsigned dmul =
988 this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
989
990 if (this->matched_candidate->type->is_array()) {
991 /* Array variable */
992 const unsigned matrix_cols =
993 this->matched_candidate->type->fields.array->matrix_columns;
994 const unsigned vector_elements =
995 this->matched_candidate->type->fields.array->vector_elements;
996 unsigned actual_array_size;
997 switch (this->lowered_builtin_array_variable) {
998 case clip_distance:
999 actual_array_size = prog->last_vert_prog ?
1000 prog->last_vert_prog->info.clip_distance_array_size : 0;
1001 break;
1002 case cull_distance:
1003 actual_array_size = prog->last_vert_prog ?
1004 prog->last_vert_prog->info.cull_distance_array_size : 0;
1005 break;
1006 case tess_level_outer:
1007 actual_array_size = 4;
1008 break;
1009 case tess_level_inner:
1010 actual_array_size = 2;
1011 break;
1012 case none:
1013 default:
1014 actual_array_size = this->matched_candidate->type->array_size();
1015 break;
1016 }
1017
1018 if (this->is_subscripted) {
1019 /* Check array bounds. */
1020 if (this->array_subscript >= actual_array_size) {
1021 linker_error(prog, "Transform feedback varying %s has index "
1022 "%i, but the array size is %u.",
1023 this->orig_name, this->array_subscript,
1024 actual_array_size);
1025 return false;
1026 }
1027 unsigned array_elem_size = this->lowered_builtin_array_variable ?
1028 1 : vector_elements * matrix_cols * dmul;
1029 fine_location += array_elem_size * this->array_subscript;
1030 this->size = 1;
1031 } else {
1032 this->size = actual_array_size;
1033 }
1034 this->vector_elements = vector_elements;
1035 this->matrix_columns = matrix_cols;
1036 if (this->lowered_builtin_array_variable)
1037 this->type = GL_FLOAT;
1038 else
1039 this->type = this->matched_candidate->type->fields.array->gl_type;
1040 } else {
1041 /* Regular variable (scalar, vector, or matrix) */
1042 if (this->is_subscripted) {
1043 linker_error(prog, "Transform feedback varying %s requested, "
1044 "but %s is not an array.",
1045 this->orig_name, this->var_name);
1046 return false;
1047 }
1048 this->size = 1;
1049 this->vector_elements = this->matched_candidate->type->vector_elements;
1050 this->matrix_columns = this->matched_candidate->type->matrix_columns;
1051 this->type = this->matched_candidate->type->gl_type;
1052 }
1053 this->location = fine_location / 4;
1054 this->location_frac = fine_location % 4;
1055
1056 /* From GL_EXT_transform_feedback:
1057 * A program will fail to link if:
1058 *
1059 * * the total number of components to capture in any varying
1060 * variable in <varyings> is greater than the constant
1061 * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
1062 * buffer mode is SEPARATE_ATTRIBS_EXT;
1063 */
1064 if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
1065 this->num_components() >
1066 ctx->Const.MaxTransformFeedbackSeparateComponents) {
1067 linker_error(prog, "Transform feedback varying %s exceeds "
1068 "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
1069 this->orig_name);
1070 return false;
1071 }
1072
1073 /* Only transform feedback varyings can be assigned to non-zero streams,
1074 * so assign the stream id here.
1075 */
1076 this->stream_id = this->matched_candidate->toplevel_var->data.stream;
1077
1078 unsigned array_offset = this->array_subscript * 4 * dmul;
1079 unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
1080 this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
1081 this->offset = this->matched_candidate->toplevel_var->data.offset +
1082 array_offset + struct_offset;
1083
1084 return true;
1085 }
1086
1087
1088 unsigned
1089 tfeedback_decl::get_num_outputs() const
1090 {
1091 if (!this->is_varying()) {
1092 return 0;
1093 }
1094 return (this->num_components() + this->location_frac + 3)/4;
1095 }
1096
1097
1098 /**
1099 * Update gl_transform_feedback_info to reflect this tfeedback_decl.
1100 *
1101 * If an error occurs, the error is reported through linker_error() and false
1102 * is returned.
1103 */
1104 bool
1105 tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
1106 struct gl_transform_feedback_info *info,
1107 unsigned buffer, unsigned buffer_index,
1108 const unsigned max_outputs, bool *explicit_stride,
1109 bool has_xfb_qualifiers) const
1110 {
1111 unsigned xfb_offset = 0;
1112 unsigned size = this->size;
1113 /* Handle gl_SkipComponents. */
1114 if (this->skip_components) {
1115 info->Buffers[buffer].Stride += this->skip_components;
1116 size = this->skip_components;
1117 goto store_varying;
1118 }
1119
1120 if (this->next_buffer_separator) {
1121 size = 0;
1122 goto store_varying;
1123 }
1124
1125 if (has_xfb_qualifiers) {
1126 xfb_offset = this->offset / 4;
1127 } else {
1128 xfb_offset = info->Buffers[buffer].Stride;
1129 }
1130 info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
1131
1132 {
1133 unsigned location = this->location;
1134 unsigned location_frac = this->location_frac;
1135 unsigned num_components = this->num_components();
1136 while (num_components > 0) {
1137 unsigned output_size = MIN2(num_components, 4 - location_frac);
1138 assert((info->NumOutputs == 0 && max_outputs == 0) ||
1139 info->NumOutputs < max_outputs);
1140
1141 /* From the ARB_enhanced_layouts spec:
1142 *
1143 * "If such a block member or variable is not written during a shader
1144 * invocation, the buffer contents at the assigned offset will be
1145 * undefined. Even if there are no static writes to a variable or
1146 * member that is assigned a transform feedback offset, the space is
1147 * still allocated in the buffer and still affects the stride."
1148 */
1149 if (this->is_varying_written()) {
1150 info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
1151 info->Outputs[info->NumOutputs].OutputRegister = location;
1152 info->Outputs[info->NumOutputs].NumComponents = output_size;
1153 info->Outputs[info->NumOutputs].StreamId = stream_id;
1154 info->Outputs[info->NumOutputs].OutputBuffer = buffer;
1155 info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
1156 ++info->NumOutputs;
1157 }
1158 info->Buffers[buffer].Stream = this->stream_id;
1159 xfb_offset += output_size;
1160
1161 num_components -= output_size;
1162 location++;
1163 location_frac = 0;
1164 }
1165 }
1166
1167 if (explicit_stride && explicit_stride[buffer]) {
1168 if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
1169 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
1170 "multiple of 8 as its applied to a type that is or "
1171 "contains a double.",
1172 info->Buffers[buffer].Stride * 4);
1173 return false;
1174 }
1175
1176 if ((this->offset / 4) / info->Buffers[buffer].Stride !=
1177 (xfb_offset - 1) / info->Buffers[buffer].Stride) {
1178 linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
1179 "buffer (%d)", xfb_offset * 4,
1180 info->Buffers[buffer].Stride * 4, buffer);
1181 return false;
1182 }
1183 } else {
1184 info->Buffers[buffer].Stride = xfb_offset;
1185 }
1186
1187 /* From GL_EXT_transform_feedback:
1188 * A program will fail to link if:
1189 *
1190 * * the total number of components to capture is greater than
1191 * the constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
1192 * and the buffer mode is INTERLEAVED_ATTRIBS_EXT.
1193 *
1194 * From GL_ARB_enhanced_layouts:
1195 *
1196 * "The resulting stride (implicit or explicit) must be less than or
1197 * equal to the implementation-dependent constant
1198 * gl_MaxTransformFeedbackInterleavedComponents."
1199 */
1200 if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
1201 has_xfb_qualifiers) &&
1202 info->Buffers[buffer].Stride >
1203 ctx->Const.MaxTransformFeedbackInterleavedComponents) {
1204 linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1205 "limit has been exceeded.");
1206 return false;
1207 }
1208
1209 store_varying:
1210 info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
1211 this->orig_name);
1212 info->Varyings[info->NumVarying].Type = this->type;
1213 info->Varyings[info->NumVarying].Size = size;
1214 info->Varyings[info->NumVarying].BufferIndex = buffer_index;
1215 info->NumVarying++;
1216 info->Buffers[buffer].NumVaryings++;
1217
1218 return true;
1219 }
1220
1221
1222 const tfeedback_candidate *
1223 tfeedback_decl::find_candidate(gl_shader_program *prog,
1224 hash_table *tfeedback_candidates)
1225 {
1226 const char *name = this->var_name;
1227 switch (this->lowered_builtin_array_variable) {
1228 case none:
1229 name = this->var_name;
1230 break;
1231 case clip_distance:
1232 name = "gl_ClipDistanceMESA";
1233 break;
1234 case cull_distance:
1235 name = "gl_CullDistanceMESA";
1236 break;
1237 case tess_level_outer:
1238 name = "gl_TessLevelOuterMESA";
1239 break;
1240 case tess_level_inner:
1241 name = "gl_TessLevelInnerMESA";
1242 break;
1243 }
1244 hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
1245
1246 this->matched_candidate = entry ?
1247 (const tfeedback_candidate *) entry->data : NULL;
1248
1249 if (!this->matched_candidate) {
1250 /* From GL_EXT_transform_feedback:
1251 * A program will fail to link if:
1252 *
1253 * * any variable name specified in the <varyings> array is not
1254 * declared as an output in the geometry shader (if present) or
1255 * the vertex shader (if no geometry shader is present);
1256 */
1257 linker_error(prog, "Transform feedback varying %s undeclared.",
1258 this->orig_name);
1259 }
1260
1261 return this->matched_candidate;
1262 }
1263
1264
1265 /**
1266 * Parse all the transform feedback declarations that were passed to
1267 * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
1268 *
1269 * If an error occurs, the error is reported through linker_error() and false
1270 * is returned.
1271 */
1272 static bool
1273 parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
1274 const void *mem_ctx, unsigned num_names,
1275 char **varying_names, tfeedback_decl *decls)
1276 {
1277 for (unsigned i = 0; i < num_names; ++i) {
1278 decls[i].init(ctx, mem_ctx, varying_names[i]);
1279
1280 if (!decls[i].is_varying())
1281 continue;
1282
1283 /* From GL_EXT_transform_feedback:
1284 * A program will fail to link if:
1285 *
1286 * * any two entries in the <varyings> array specify the same varying
1287 * variable;
1288 *
1289 * We interpret this to mean "any two entries in the <varyings> array
1290 * specify the same varying variable and array index", since transform
1291 * feedback of arrays would be useless otherwise.
1292 */
1293 for (unsigned j = 0; j < i; ++j) {
1294 if (decls[j].is_varying()) {
1295 if (tfeedback_decl::is_same(decls[i], decls[j])) {
1296 linker_error(prog, "Transform feedback varying %s specified "
1297 "more than once.", varying_names[i]);
1298 return false;
1299 }
1300 }
1301 }
1302 }
1303 return true;
1304 }
1305
1306
1307 static int
1308 cmp_xfb_offset(const void * x_generic, const void * y_generic)
1309 {
1310 tfeedback_decl *x = (tfeedback_decl *) x_generic;
1311 tfeedback_decl *y = (tfeedback_decl *) y_generic;
1312
1313 if (x->get_buffer() != y->get_buffer())
1314 return x->get_buffer() - y->get_buffer();
1315 return x->get_offset() - y->get_offset();
1316 }
1317
1318 /**
1319 * Store transform feedback location assignments into
1320 * prog->sh.LinkedTransformFeedback based on the data stored in
1321 * tfeedback_decls.
1322 *
1323 * If an error occurs, the error is reported through linker_error() and false
1324 * is returned.
1325 */
1326 static bool
1327 store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
1328 unsigned num_tfeedback_decls,
1329 tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers)
1330 {
1331 if (!prog->last_vert_prog)
1332 return true;
1333
1334 /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
1335 * tracking the number of buffers doesn't overflow.
1336 */
1337 assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
1338
1339 bool separate_attribs_mode =
1340 prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
1341
1342 struct gl_program *xfb_prog = prog->last_vert_prog;
1343 xfb_prog->sh.LinkedTransformFeedback =
1344 rzalloc(xfb_prog, struct gl_transform_feedback_info);
1345
1346 /* The xfb_offset qualifier does not have to be used in increasing order
1347 * however some drivers expect to receive the list of transform feedback
1348 * declarations in order so sort it now for convenience.
1349 */
1350 if (has_xfb_qualifiers) {
1351 qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
1352 cmp_xfb_offset);
1353 }
1354
1355 xfb_prog->sh.LinkedTransformFeedback->Varyings =
1356 rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
1357 num_tfeedback_decls);
1358
1359 unsigned num_outputs = 0;
1360 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1361 if (tfeedback_decls[i].is_varying_written())
1362 num_outputs += tfeedback_decls[i].get_num_outputs();
1363 }
1364
1365 xfb_prog->sh.LinkedTransformFeedback->Outputs =
1366 rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
1367 num_outputs);
1368
1369 unsigned num_buffers = 0;
1370 unsigned buffers = 0;
1371
1372 if (!has_xfb_qualifiers && separate_attribs_mode) {
1373 /* GL_SEPARATE_ATTRIBS */
1374 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1375 if (!tfeedback_decls[i].store(ctx, prog,
1376 xfb_prog->sh.LinkedTransformFeedback,
1377 num_buffers, num_buffers, num_outputs,
1378 NULL, has_xfb_qualifiers))
1379 return false;
1380
1381 buffers |= 1 << num_buffers;
1382 num_buffers++;
1383 }
1384 }
1385 else {
1386 /* GL_INVERLEAVED_ATTRIBS */
1387 int buffer_stream_id = -1;
1388 unsigned buffer =
1389 num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
1390 bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
1391
1392 /* Apply any xfb_stride global qualifiers */
1393 if (has_xfb_qualifiers) {
1394 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1395 if (prog->TransformFeedback.BufferStride[j]) {
1396 explicit_stride[j] = true;
1397 xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
1398 prog->TransformFeedback.BufferStride[j] / 4;
1399 }
1400 }
1401 }
1402
1403 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1404 if (has_xfb_qualifiers &&
1405 buffer != tfeedback_decls[i].get_buffer()) {
1406 /* we have moved to the next buffer so reset stream id */
1407 buffer_stream_id = -1;
1408 num_buffers++;
1409 }
1410
1411 if (tfeedback_decls[i].is_next_buffer_separator()) {
1412 if (!tfeedback_decls[i].store(ctx, prog,
1413 xfb_prog->sh.LinkedTransformFeedback,
1414 buffer, num_buffers, num_outputs,
1415 explicit_stride, has_xfb_qualifiers))
1416 return false;
1417 num_buffers++;
1418 buffer_stream_id = -1;
1419 continue;
1420 }
1421
1422 if (has_xfb_qualifiers) {
1423 buffer = tfeedback_decls[i].get_buffer();
1424 } else {
1425 buffer = num_buffers;
1426 }
1427
1428 if (tfeedback_decls[i].is_varying()) {
1429 if (buffer_stream_id == -1) {
1430 /* First varying writing to this buffer: remember its stream */
1431 buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
1432
1433 /* Only mark a buffer as active when there is a varying
1434 * attached to it. This behaviour is based on a revised version
1435 * of section 13.2.2 of the GL 4.6 spec.
1436 */
1437 buffers |= 1 << buffer;
1438 } else if (buffer_stream_id !=
1439 (int) tfeedback_decls[i].get_stream_id()) {
1440 /* Varying writes to the same buffer from a different stream */
1441 linker_error(prog,
1442 "Transform feedback can't capture varyings belonging "
1443 "to different vertex streams in a single buffer. "
1444 "Varying %s writes to buffer from stream %u, other "
1445 "varyings in the same buffer write from stream %u.",
1446 tfeedback_decls[i].name(),
1447 tfeedback_decls[i].get_stream_id(),
1448 buffer_stream_id);
1449 return false;
1450 }
1451 }
1452
1453 if (!tfeedback_decls[i].store(ctx, prog,
1454 xfb_prog->sh.LinkedTransformFeedback,
1455 buffer, num_buffers, num_outputs,
1456 explicit_stride, has_xfb_qualifiers))
1457 return false;
1458 }
1459 }
1460
1461 assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
1462
1463 xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
1464 return true;
1465 }
1466
1467 namespace {
1468
1469 /**
1470 * Data structure recording the relationship between outputs of one shader
1471 * stage (the "producer") and inputs of another (the "consumer").
1472 */
1473 class varying_matches
1474 {
1475 public:
1476 varying_matches(bool disable_varying_packing, bool xfb_enabled,
1477 bool enhanced_layouts_enabled,
1478 gl_shader_stage producer_stage,
1479 gl_shader_stage consumer_stage);
1480 ~varying_matches();
1481 void record(ir_variable *producer_var, ir_variable *consumer_var);
1482 unsigned assign_locations(struct gl_shader_program *prog,
1483 uint8_t components[],
1484 uint64_t reserved_slots);
1485 void store_locations() const;
1486
1487 private:
1488 bool is_varying_packing_safe(const glsl_type *type,
1489 const ir_variable *var) const;
1490
1491 /**
1492 * If true, this driver disables varying packing, so all varyings need to
1493 * be aligned on slot boundaries, and take up a number of slots equal to
1494 * their number of matrix columns times their array size.
1495 *
1496 * Packing may also be disabled because our current packing method is not
1497 * safe in SSO or versions of OpenGL where interpolation qualifiers are not
1498 * guaranteed to match across stages.
1499 */
1500 const bool disable_varying_packing;
1501
1502 /**
1503 * If true, this driver has transform feedback enabled. The transform
1504 * feedback code requires at least some packing be done even when varying
1505 * packing is disabled, fortunately where transform feedback requires
1506 * packing it's safe to override the disabled setting. See
1507 * is_varying_packing_safe().
1508 */
1509 const bool xfb_enabled;
1510
1511 const bool enhanced_layouts_enabled;
1512
1513 /**
1514 * Enum representing the order in which varyings are packed within a
1515 * packing class.
1516 *
1517 * Currently we pack vec4's first, then vec2's, then scalar values, then
1518 * vec3's. This order ensures that the only vectors that are at risk of
1519 * having to be "double parked" (split between two adjacent varying slots)
1520 * are the vec3's.
1521 */
1522 enum packing_order_enum {
1523 PACKING_ORDER_VEC4,
1524 PACKING_ORDER_VEC2,
1525 PACKING_ORDER_SCALAR,
1526 PACKING_ORDER_VEC3,
1527 };
1528
1529 static unsigned compute_packing_class(const ir_variable *var);
1530 static packing_order_enum compute_packing_order(const ir_variable *var);
1531 static int match_comparator(const void *x_generic, const void *y_generic);
1532 static int xfb_comparator(const void *x_generic, const void *y_generic);
1533
1534 /**
1535 * Structure recording the relationship between a single producer output
1536 * and a single consumer input.
1537 */
1538 struct match {
1539 /**
1540 * Packing class for this varying, computed by compute_packing_class().
1541 */
1542 unsigned packing_class;
1543
1544 /**
1545 * Packing order for this varying, computed by compute_packing_order().
1546 */
1547 packing_order_enum packing_order;
1548 unsigned num_components;
1549
1550 /**
1551 * The output variable in the producer stage.
1552 */
1553 ir_variable *producer_var;
1554
1555 /**
1556 * The input variable in the consumer stage.
1557 */
1558 ir_variable *consumer_var;
1559
1560 /**
1561 * The location which has been assigned for this varying. This is
1562 * expressed in multiples of a float, with the first generic varying
1563 * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
1564 * value 0.
1565 */
1566 unsigned generic_location;
1567 } *matches;
1568
1569 /**
1570 * The number of elements in the \c matches array that are currently in
1571 * use.
1572 */
1573 unsigned num_matches;
1574
1575 /**
1576 * The number of elements that were set aside for the \c matches array when
1577 * it was allocated.
1578 */
1579 unsigned matches_capacity;
1580
1581 gl_shader_stage producer_stage;
1582 gl_shader_stage consumer_stage;
1583 };
1584
1585 } /* anonymous namespace */
1586
1587 varying_matches::varying_matches(bool disable_varying_packing,
1588 bool xfb_enabled,
1589 bool enhanced_layouts_enabled,
1590 gl_shader_stage producer_stage,
1591 gl_shader_stage consumer_stage)
1592 : disable_varying_packing(disable_varying_packing),
1593 xfb_enabled(xfb_enabled),
1594 enhanced_layouts_enabled(enhanced_layouts_enabled),
1595 producer_stage(producer_stage),
1596 consumer_stage(consumer_stage)
1597 {
1598 /* Note: this initial capacity is rather arbitrarily chosen to be large
1599 * enough for many cases without wasting an unreasonable amount of space.
1600 * varying_matches::record() will resize the array if there are more than
1601 * this number of varyings.
1602 */
1603 this->matches_capacity = 8;
1604 this->matches = (match *)
1605 malloc(sizeof(*this->matches) * this->matches_capacity);
1606 this->num_matches = 0;
1607 }
1608
1609
1610 varying_matches::~varying_matches()
1611 {
1612 free(this->matches);
1613 }
1614
1615
1616 /**
1617 * Packing is always safe on individual arrays, structures, and matrices. It
1618 * is also safe if the varying is only used for transform feedback.
1619 */
1620 bool
1621 varying_matches::is_varying_packing_safe(const glsl_type *type,
1622 const ir_variable *var) const
1623 {
1624 if (consumer_stage == MESA_SHADER_TESS_EVAL ||
1625 consumer_stage == MESA_SHADER_TESS_CTRL ||
1626 producer_stage == MESA_SHADER_TESS_CTRL)
1627 return false;
1628
1629 return xfb_enabled && (type->is_array() || type->is_struct() ||
1630 type->is_matrix() || var->data.is_xfb_only);
1631 }
1632
1633
1634 /**
1635 * Record the given producer/consumer variable pair in the list of variables
1636 * that should later be assigned locations.
1637 *
1638 * It is permissible for \c consumer_var to be NULL (this happens if a
1639 * variable is output by the producer and consumed by transform feedback, but
1640 * not consumed by the consumer).
1641 *
1642 * If \c producer_var has already been paired up with a consumer_var, or
1643 * producer_var is part of fixed pipeline functionality (and hence already has
1644 * a location assigned), this function has no effect.
1645 *
1646 * Note: as a side effect this function may change the interpolation type of
1647 * \c producer_var, but only when the change couldn't possibly affect
1648 * rendering.
1649 */
1650 void
1651 varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
1652 {
1653 assert(producer_var != NULL || consumer_var != NULL);
1654
1655 if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
1656 producer_var->data.explicit_location)) ||
1657 (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
1658 consumer_var->data.explicit_location))) {
1659 /* Either a location already exists for this variable (since it is part
1660 * of fixed functionality), or it has already been recorded as part of a
1661 * previous match.
1662 */
1663 return;
1664 }
1665
1666 bool needs_flat_qualifier = consumer_var == NULL &&
1667 (producer_var->type->contains_integer() ||
1668 producer_var->type->contains_double());
1669
1670 if (!disable_varying_packing &&
1671 (needs_flat_qualifier ||
1672 (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
1673 /* Since this varying is not being consumed by the fragment shader, its
1674 * interpolation type varying cannot possibly affect rendering.
1675 * Also, this variable is non-flat and is (or contains) an integer
1676 * or a double.
1677 * If the consumer stage is unknown, don't modify the interpolation
1678 * type as it could affect rendering later with separate shaders.
1679 *
1680 * lower_packed_varyings requires all integer varyings to flat,
1681 * regardless of where they appear. We can trivially satisfy that
1682 * requirement by changing the interpolation type to flat here.
1683 */
1684 if (producer_var) {
1685 producer_var->data.centroid = false;
1686 producer_var->data.sample = false;
1687 producer_var->data.interpolation = INTERP_MODE_FLAT;
1688 }
1689
1690 if (consumer_var) {
1691 consumer_var->data.centroid = false;
1692 consumer_var->data.sample = false;
1693 consumer_var->data.interpolation = INTERP_MODE_FLAT;
1694 }
1695 }
1696
1697 if (this->num_matches == this->matches_capacity) {
1698 this->matches_capacity *= 2;
1699 this->matches = (match *)
1700 realloc(this->matches,
1701 sizeof(*this->matches) * this->matches_capacity);
1702 }
1703
1704 /* We must use the consumer to compute the packing class because in GL4.4+
1705 * there is no guarantee interpolation qualifiers will match across stages.
1706 *
1707 * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
1708 *
1709 * "The type and presence of interpolation qualifiers of variables with
1710 * the same name declared in all linked shaders for the same cross-stage
1711 * interface must match, otherwise the link command will fail.
1712 *
1713 * When comparing an output from one stage to an input of a subsequent
1714 * stage, the input and output don't match if their interpolation
1715 * qualifiers (or lack thereof) are not the same."
1716 *
1717 * This text was also in at least revison 7 of the 4.40 spec but is no
1718 * longer in revision 9 and not in the 4.50 spec.
1719 */
1720 const ir_variable *const var = (consumer_var != NULL)
1721 ? consumer_var : producer_var;
1722 const gl_shader_stage stage = (consumer_var != NULL)
1723 ? consumer_stage : producer_stage;
1724 const glsl_type *type = get_varying_type(var, stage);
1725
1726 if (producer_var && consumer_var &&
1727 consumer_var->data.must_be_shader_input) {
1728 producer_var->data.must_be_shader_input = 1;
1729 }
1730
1731 this->matches[this->num_matches].packing_class
1732 = this->compute_packing_class(var);
1733 this->matches[this->num_matches].packing_order
1734 = this->compute_packing_order(var);
1735 if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
1736 var->data.must_be_shader_input) {
1737 unsigned slots = type->count_attribute_slots(false);
1738 this->matches[this->num_matches].num_components = slots * 4;
1739 } else {
1740 this->matches[this->num_matches].num_components
1741 = type->component_slots();
1742 }
1743
1744 this->matches[this->num_matches].producer_var = producer_var;
1745 this->matches[this->num_matches].consumer_var = consumer_var;
1746 this->num_matches++;
1747 if (producer_var)
1748 producer_var->data.is_unmatched_generic_inout = 0;
1749 if (consumer_var)
1750 consumer_var->data.is_unmatched_generic_inout = 0;
1751 }
1752
1753
1754 /**
1755 * Choose locations for all of the variable matches that were previously
1756 * passed to varying_matches::record().
1757 * \param components returns array[slot] of number of components used
1758 * per slot (1, 2, 3 or 4)
1759 * \param reserved_slots bitmask indicating which varying slots are already
1760 * allocated
1761 * \return number of slots (4-element vectors) allocated
1762 */
1763 unsigned
1764 varying_matches::assign_locations(struct gl_shader_program *prog,
1765 uint8_t components[],
1766 uint64_t reserved_slots)
1767 {
1768 /* If packing has been disabled then we cannot safely sort the varyings by
1769 * class as it may mean we are using a version of OpenGL where
1770 * interpolation qualifiers are not guaranteed to be matching across
1771 * shaders, sorting in this case could result in mismatching shader
1772 * interfaces.
1773 * When packing is disabled the sort orders varyings used by transform
1774 * feedback first, but also depends on *undefined behaviour* of qsort to
1775 * reverse the order of the varyings. See: xfb_comparator().
1776 */
1777 if (!this->disable_varying_packing) {
1778 /* Sort varying matches into an order that makes them easy to pack. */
1779 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1780 &varying_matches::match_comparator);
1781 } else {
1782 /* Only sort varyings that are only used by transform feedback. */
1783 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1784 &varying_matches::xfb_comparator);
1785 }
1786
1787 unsigned generic_location = 0;
1788 unsigned generic_patch_location = MAX_VARYING*4;
1789 bool previous_var_xfb_only = false;
1790 unsigned previous_packing_class = ~0u;
1791
1792 /* For tranform feedback separate mode, we know the number of attributes
1793 * is <= the number of buffers. So packing isn't critical. In fact,
1794 * packing vec3 attributes can cause trouble because splitting a vec3
1795 * effectively creates an additional transform feedback output. The
1796 * extra TFB output may exceed device driver limits.
1797 */
1798 const bool dont_pack_vec3 =
1799 (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
1800 prog->TransformFeedback.NumVarying > 0);
1801
1802 for (unsigned i = 0; i < this->num_matches; i++) {
1803 unsigned *location = &generic_location;
1804 const ir_variable *var;
1805 const glsl_type *type;
1806 bool is_vertex_input = false;
1807
1808 if (matches[i].consumer_var) {
1809 var = matches[i].consumer_var;
1810 type = get_varying_type(var, consumer_stage);
1811 if (consumer_stage == MESA_SHADER_VERTEX)
1812 is_vertex_input = true;
1813 } else {
1814 var = matches[i].producer_var;
1815 type = get_varying_type(var, producer_stage);
1816 }
1817
1818 if (var->data.patch)
1819 location = &generic_patch_location;
1820
1821 /* Advance to the next slot if this varying has a different packing
1822 * class than the previous one, and we're not already on a slot
1823 * boundary.
1824 *
1825 * Also advance to the next slot if packing is disabled. This makes sure
1826 * we don't assign varyings the same locations which is possible
1827 * because we still pack individual arrays, records and matrices even
1828 * when packing is disabled. Note we don't advance to the next slot if
1829 * we can pack varyings together that are only used for transform
1830 * feedback.
1831 */
1832 if (var->data.must_be_shader_input ||
1833 (this->disable_varying_packing &&
1834 !(previous_var_xfb_only && var->data.is_xfb_only)) ||
1835 (previous_packing_class != this->matches[i].packing_class) ||
1836 (this->matches[i].packing_order == PACKING_ORDER_VEC3 &&
1837 dont_pack_vec3)) {
1838 *location = ALIGN(*location, 4);
1839 }
1840
1841 previous_var_xfb_only = var->data.is_xfb_only;
1842 previous_packing_class = this->matches[i].packing_class;
1843
1844 /* The number of components taken up by this variable. For vertex shader
1845 * inputs, we use the number of slots * 4, as they have different
1846 * counting rules.
1847 */
1848 unsigned num_components = is_vertex_input ?
1849 type->count_attribute_slots(is_vertex_input) * 4 :
1850 this->matches[i].num_components;
1851
1852 /* The last slot for this variable, inclusive. */
1853 unsigned slot_end = *location + num_components - 1;
1854
1855 /* FIXME: We could be smarter in the below code and loop back over
1856 * trying to fill any locations that we skipped because we couldn't pack
1857 * the varying between an explicit location. For now just let the user
1858 * hit the linking error if we run out of room and suggest they use
1859 * explicit locations.
1860 */
1861 while (slot_end < MAX_VARYING * 4u) {
1862 const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
1863 const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
1864
1865 assert(slots > 0);
1866
1867 if ((reserved_slots & slot_mask) == 0) {
1868 break;
1869 }
1870
1871 *location = ALIGN(*location + 1, 4);
1872 slot_end = *location + num_components - 1;
1873 }
1874
1875 if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
1876 linker_error(prog, "insufficient contiguous locations available for "
1877 "%s it is possible an array or struct could not be "
1878 "packed between varyings with explicit locations. Try "
1879 "using an explicit location for arrays and structs.",
1880 var->name);
1881 }
1882
1883 if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
1884 for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
1885 components[j] = 4;
1886 components[slot_end / 4u] = (slot_end & 3) + 1;
1887 }
1888
1889 this->matches[i].generic_location = *location;
1890
1891 *location = slot_end + 1;
1892 }
1893
1894 return (generic_location + 3) / 4;
1895 }
1896
1897
1898 /**
1899 * Update the producer and consumer shaders to reflect the locations
1900 * assignments that were made by varying_matches::assign_locations().
1901 */
1902 void
1903 varying_matches::store_locations() const
1904 {
1905 /* Check is location needs to be packed with lower_packed_varyings() or if
1906 * we can just use ARB_enhanced_layouts packing.
1907 */
1908 bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
1909 const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
1910
1911 for (unsigned i = 0; i < this->num_matches; i++) {
1912 ir_variable *producer_var = this->matches[i].producer_var;
1913 ir_variable *consumer_var = this->matches[i].consumer_var;
1914 unsigned generic_location = this->matches[i].generic_location;
1915 unsigned slot = generic_location / 4;
1916 unsigned offset = generic_location % 4;
1917
1918 if (producer_var) {
1919 producer_var->data.location = VARYING_SLOT_VAR0 + slot;
1920 producer_var->data.location_frac = offset;
1921 }
1922
1923 if (consumer_var) {
1924 assert(consumer_var->data.location == -1);
1925 consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
1926 consumer_var->data.location_frac = offset;
1927 }
1928
1929 /* Find locations suitable for native packing via
1930 * ARB_enhanced_layouts.
1931 */
1932 if (producer_var && consumer_var) {
1933 if (enhanced_layouts_enabled) {
1934 const glsl_type *type =
1935 get_varying_type(producer_var, producer_stage);
1936 if (type->is_array() || type->is_matrix() || type->is_struct() ||
1937 type->is_double()) {
1938 unsigned comp_slots = type->component_slots() + offset;
1939 unsigned slots = comp_slots / 4;
1940 if (comp_slots % 4)
1941 slots += 1;
1942
1943 for (unsigned j = 0; j < slots; j++) {
1944 pack_loc[slot + j] = true;
1945 }
1946 } else if (offset + type->vector_elements > 4) {
1947 pack_loc[slot] = true;
1948 pack_loc[slot + 1] = true;
1949 } else {
1950 loc_type[slot][offset] = type;
1951 }
1952 }
1953 }
1954 }
1955
1956 /* Attempt to use ARB_enhanced_layouts for more efficient packing if
1957 * suitable.
1958 */
1959 if (enhanced_layouts_enabled) {
1960 for (unsigned i = 0; i < this->num_matches; i++) {
1961 ir_variable *producer_var = this->matches[i].producer_var;
1962 ir_variable *consumer_var = this->matches[i].consumer_var;
1963 unsigned generic_location = this->matches[i].generic_location;
1964 unsigned slot = generic_location / 4;
1965
1966 if (pack_loc[slot] || !producer_var || !consumer_var)
1967 continue;
1968
1969 const glsl_type *type =
1970 get_varying_type(producer_var, producer_stage);
1971 bool type_match = true;
1972 for (unsigned j = 0; j < 4; j++) {
1973 if (loc_type[slot][j]) {
1974 if (type->base_type != loc_type[slot][j]->base_type)
1975 type_match = false;
1976 }
1977 }
1978
1979 if (type_match) {
1980 producer_var->data.explicit_location = 1;
1981 consumer_var->data.explicit_location = 1;
1982 producer_var->data.explicit_component = 1;
1983 consumer_var->data.explicit_component = 1;
1984 }
1985 }
1986 }
1987 }
1988
1989
1990 /**
1991 * Compute the "packing class" of the given varying. This is an unsigned
1992 * integer with the property that two variables in the same packing class can
1993 * be safely backed into the same vec4.
1994 */
1995 unsigned
1996 varying_matches::compute_packing_class(const ir_variable *var)
1997 {
1998 /* Without help from the back-end, there is no way to pack together
1999 * variables with different interpolation types, because
2000 * lower_packed_varyings must choose exactly one interpolation type for
2001 * each packed varying it creates.
2002 *
2003 * However, we can safely pack together floats, ints, and uints, because:
2004 *
2005 * - varyings of base type "int" and "uint" must use the "flat"
2006 * interpolation type, which can only occur in GLSL 1.30 and above.
2007 *
2008 * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
2009 * can store flat floats as ints without losing any information (using
2010 * the ir_unop_bitcast_* opcodes).
2011 *
2012 * Therefore, the packing class depends only on the interpolation type.
2013 */
2014 const unsigned interp = var->is_interpolation_flat()
2015 ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
2016
2017 assert(interp < (1 << 3));
2018
2019 const unsigned packing_class = (interp << 0) |
2020 (var->data.centroid << 3) |
2021 (var->data.sample << 4) |
2022 (var->data.patch << 5) |
2023 (var->data.must_be_shader_input << 6);
2024
2025 return packing_class;
2026 }
2027
2028
2029 /**
2030 * Compute the "packing order" of the given varying. This is a sort key we
2031 * use to determine when to attempt to pack the given varying relative to
2032 * other varyings in the same packing class.
2033 */
2034 varying_matches::packing_order_enum
2035 varying_matches::compute_packing_order(const ir_variable *var)
2036 {
2037 const glsl_type *element_type = var->type;
2038
2039 while (element_type->is_array()) {
2040 element_type = element_type->fields.array;
2041 }
2042
2043 switch (element_type->component_slots() % 4) {
2044 case 1: return PACKING_ORDER_SCALAR;
2045 case 2: return PACKING_ORDER_VEC2;
2046 case 3: return PACKING_ORDER_VEC3;
2047 case 0: return PACKING_ORDER_VEC4;
2048 default:
2049 assert(!"Unexpected value of vector_elements");
2050 return PACKING_ORDER_VEC4;
2051 }
2052 }
2053
2054
2055 /**
2056 * Comparison function passed to qsort() to sort varyings by packing_class and
2057 * then by packing_order.
2058 */
2059 int
2060 varying_matches::match_comparator(const void *x_generic, const void *y_generic)
2061 {
2062 const match *x = (const match *) x_generic;
2063 const match *y = (const match *) y_generic;
2064
2065 if (x->packing_class != y->packing_class)
2066 return x->packing_class - y->packing_class;
2067 return x->packing_order - y->packing_order;
2068 }
2069
2070
2071 /**
2072 * Comparison function passed to qsort() to sort varyings used only by
2073 * transform feedback when packing of other varyings is disabled.
2074 */
2075 int
2076 varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
2077 {
2078 const match *x = (const match *) x_generic;
2079
2080 if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
2081 return match_comparator(x_generic, y_generic);
2082
2083 /* FIXME: When the comparator returns 0 it means the elements being
2084 * compared are equivalent. However the qsort documentation says:
2085 *
2086 * "The order of equivalent elements is undefined."
2087 *
2088 * In practice the sort ends up reversing the order of the varyings which
2089 * means locations are also assigned in this reversed order and happens to
2090 * be what we want. This is also whats happening in
2091 * varying_matches::match_comparator().
2092 */
2093 return 0;
2094 }
2095
2096
2097 /**
2098 * Is the given variable a varying variable to be counted against the
2099 * limit in ctx->Const.MaxVarying?
2100 * This includes variables such as texcoords, colors and generic
2101 * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
2102 */
2103 static bool
2104 var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
2105 {
2106 /* Only fragment shaders will take a varying variable as an input */
2107 if (stage == MESA_SHADER_FRAGMENT &&
2108 var->data.mode == ir_var_shader_in) {
2109 switch (var->data.location) {
2110 case VARYING_SLOT_POS:
2111 case VARYING_SLOT_FACE:
2112 case VARYING_SLOT_PNTC:
2113 return false;
2114 default:
2115 return true;
2116 }
2117 }
2118 return false;
2119 }
2120
2121
2122 /**
2123 * Visitor class that generates tfeedback_candidate structs describing all
2124 * possible targets of transform feedback.
2125 *
2126 * tfeedback_candidate structs are stored in the hash table
2127 * tfeedback_candidates, which is passed to the constructor. This hash table
2128 * maps varying names to instances of the tfeedback_candidate struct.
2129 */
2130 class tfeedback_candidate_generator : public program_resource_visitor
2131 {
2132 public:
2133 tfeedback_candidate_generator(void *mem_ctx,
2134 hash_table *tfeedback_candidates,
2135 gl_shader_stage stage)
2136 : mem_ctx(mem_ctx),
2137 tfeedback_candidates(tfeedback_candidates),
2138 stage(stage),
2139 toplevel_var(NULL),
2140 varying_floats(0)
2141 {
2142 }
2143
2144 void process(ir_variable *var)
2145 {
2146 /* All named varying interface blocks should be flattened by now */
2147 assert(!var->is_interface_instance());
2148 assert(var->data.mode == ir_var_shader_out);
2149
2150 this->toplevel_var = var;
2151 this->varying_floats = 0;
2152 const glsl_type *t =
2153 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
2154 if (!var->data.patch && stage == MESA_SHADER_TESS_CTRL) {
2155 assert(t->is_array());
2156 t = t->fields.array;
2157 }
2158 program_resource_visitor::process(var, t, false);
2159 }
2160
2161 private:
2162 virtual void visit_field(const glsl_type *type, const char *name,
2163 bool /* row_major */,
2164 const glsl_type * /* record_type */,
2165 const enum glsl_interface_packing,
2166 bool /* last_field */)
2167 {
2168 assert(!type->without_array()->is_struct());
2169 assert(!type->without_array()->is_interface());
2170
2171 tfeedback_candidate *candidate
2172 = rzalloc(this->mem_ctx, tfeedback_candidate);
2173 candidate->toplevel_var = this->toplevel_var;
2174 candidate->type = type;
2175 candidate->offset = this->varying_floats;
2176 _mesa_hash_table_insert(this->tfeedback_candidates,
2177 ralloc_strdup(this->mem_ctx, name),
2178 candidate);
2179 this->varying_floats += type->component_slots();
2180 }
2181
2182 /**
2183 * Memory context used to allocate hash table keys and values.
2184 */
2185 void * const mem_ctx;
2186
2187 /**
2188 * Hash table in which tfeedback_candidate objects should be stored.
2189 */
2190 hash_table * const tfeedback_candidates;
2191
2192 gl_shader_stage stage;
2193
2194 /**
2195 * Pointer to the toplevel variable that is being traversed.
2196 */
2197 ir_variable *toplevel_var;
2198
2199 /**
2200 * Total number of varying floats that have been visited so far. This is
2201 * used to determine the offset to each varying within the toplevel
2202 * variable.
2203 */
2204 unsigned varying_floats;
2205 };
2206
2207
2208 namespace linker {
2209
2210 void
2211 populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
2212 hash_table *consumer_inputs,
2213 hash_table *consumer_interface_inputs,
2214 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2215 {
2216 memset(consumer_inputs_with_locations,
2217 0,
2218 sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
2219
2220 foreach_in_list(ir_instruction, node, ir) {
2221 ir_variable *const input_var = node->as_variable();
2222
2223 if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
2224 /* All interface blocks should have been lowered by this point */
2225 assert(!input_var->type->is_interface());
2226
2227 if (input_var->data.explicit_location) {
2228 /* assign_varying_locations only cares about finding the
2229 * ir_variable at the start of a contiguous location block.
2230 *
2231 * - For !producer, consumer_inputs_with_locations isn't used.
2232 *
2233 * - For !consumer, consumer_inputs_with_locations is empty.
2234 *
2235 * For consumer && producer, if you were trying to set some
2236 * ir_variable to the middle of a location block on the other side
2237 * of producer/consumer, cross_validate_outputs_to_inputs() should
2238 * be link-erroring due to either type mismatch or location
2239 * overlaps. If the variables do match up, then they've got a
2240 * matching data.location and you only looked at
2241 * consumer_inputs_with_locations[var->data.location], not any
2242 * following entries for the array/structure.
2243 */
2244 consumer_inputs_with_locations[input_var->data.location] =
2245 input_var;
2246 } else if (input_var->get_interface_type() != NULL) {
2247 char *const iface_field_name =
2248 ralloc_asprintf(mem_ctx, "%s.%s",
2249 input_var->get_interface_type()->without_array()->name,
2250 input_var->name);
2251 _mesa_hash_table_insert(consumer_interface_inputs,
2252 iface_field_name, input_var);
2253 } else {
2254 _mesa_hash_table_insert(consumer_inputs,
2255 ralloc_strdup(mem_ctx, input_var->name),
2256 input_var);
2257 }
2258 }
2259 }
2260 }
2261
2262 /**
2263 * Find a variable from the consumer that "matches" the specified variable
2264 *
2265 * This function only finds inputs with names that match. There is no
2266 * validation (here) that the types, etc. are compatible.
2267 */
2268 ir_variable *
2269 get_matching_input(void *mem_ctx,
2270 const ir_variable *output_var,
2271 hash_table *consumer_inputs,
2272 hash_table *consumer_interface_inputs,
2273 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2274 {
2275 ir_variable *input_var;
2276
2277 if (output_var->data.explicit_location) {
2278 input_var = consumer_inputs_with_locations[output_var->data.location];
2279 } else if (output_var->get_interface_type() != NULL) {
2280 char *const iface_field_name =
2281 ralloc_asprintf(mem_ctx, "%s.%s",
2282 output_var->get_interface_type()->without_array()->name,
2283 output_var->name);
2284 hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
2285 input_var = entry ? (ir_variable *) entry->data : NULL;
2286 } else {
2287 hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
2288 input_var = entry ? (ir_variable *) entry->data : NULL;
2289 }
2290
2291 return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2292 ? NULL : input_var;
2293 }
2294
2295 }
2296
2297 static int
2298 io_variable_cmp(const void *_a, const void *_b)
2299 {
2300 const ir_variable *const a = *(const ir_variable **) _a;
2301 const ir_variable *const b = *(const ir_variable **) _b;
2302
2303 if (a->data.explicit_location && b->data.explicit_location)
2304 return b->data.location - a->data.location;
2305
2306 if (a->data.explicit_location && !b->data.explicit_location)
2307 return 1;
2308
2309 if (!a->data.explicit_location && b->data.explicit_location)
2310 return -1;
2311
2312 return -strcmp(a->name, b->name);
2313 }
2314
2315 /**
2316 * Sort the shader IO variables into canonical order
2317 */
2318 static void
2319 canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
2320 {
2321 ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
2322 unsigned num_variables = 0;
2323
2324 foreach_in_list(ir_instruction, node, ir) {
2325 ir_variable *const var = node->as_variable();
2326
2327 if (var == NULL || var->data.mode != io_mode)
2328 continue;
2329
2330 /* If we have already encountered more I/O variables that could
2331 * successfully link, bail.
2332 */
2333 if (num_variables == ARRAY_SIZE(var_table))
2334 return;
2335
2336 var_table[num_variables++] = var;
2337 }
2338
2339 if (num_variables == 0)
2340 return;
2341
2342 /* Sort the list in reverse order (io_variable_cmp handles this). Later
2343 * we're going to push the variables on to the IR list as a stack, so we
2344 * want the last variable (in canonical order) to be first in the list.
2345 */
2346 qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
2347
2348 /* Remove the variable from it's current location in the IR, and put it at
2349 * the front.
2350 */
2351 for (unsigned i = 0; i < num_variables; i++) {
2352 var_table[i]->remove();
2353 ir->push_head(var_table[i]);
2354 }
2355 }
2356
2357 /**
2358 * Generate a bitfield map of the explicit locations for shader varyings.
2359 *
2360 * Note: For Tessellation shaders we are sitting right on the limits of the
2361 * 64 bit map. Per-vertex and per-patch both have separate location domains
2362 * with a max of MAX_VARYING.
2363 */
2364 static uint64_t
2365 reserved_varying_slot(struct gl_linked_shader *stage,
2366 ir_variable_mode io_mode)
2367 {
2368 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
2369 /* Avoid an overflow of the returned value */
2370 assert(MAX_VARYINGS_INCL_PATCH <= 64);
2371
2372 uint64_t slots = 0;
2373 int var_slot;
2374
2375 if (!stage)
2376 return slots;
2377
2378 foreach_in_list(ir_instruction, node, stage->ir) {
2379 ir_variable *const var = node->as_variable();
2380
2381 if (var == NULL || var->data.mode != io_mode ||
2382 !var->data.explicit_location ||
2383 var->data.location < VARYING_SLOT_VAR0)
2384 continue;
2385
2386 var_slot = var->data.location - VARYING_SLOT_VAR0;
2387
2388 unsigned num_elements = get_varying_type(var, stage->Stage)
2389 ->count_attribute_slots(io_mode == ir_var_shader_in &&
2390 stage->Stage == MESA_SHADER_VERTEX);
2391 for (unsigned i = 0; i < num_elements; i++) {
2392 if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
2393 slots |= UINT64_C(1) << var_slot;
2394 var_slot += 1;
2395 }
2396 }
2397
2398 return slots;
2399 }
2400
2401
2402 /**
2403 * Assign locations for all variables that are produced in one pipeline stage
2404 * (the "producer") and consumed in the next stage (the "consumer").
2405 *
2406 * Variables produced by the producer may also be consumed by transform
2407 * feedback.
2408 *
2409 * \param num_tfeedback_decls is the number of declarations indicating
2410 * variables that may be consumed by transform feedback.
2411 *
2412 * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
2413 * representing the result of parsing the strings passed to
2414 * glTransformFeedbackVaryings(). assign_location() will be called for
2415 * each of these objects that matches one of the outputs of the
2416 * producer.
2417 *
2418 * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
2419 * be NULL. In this case, varying locations are assigned solely based on the
2420 * requirements of transform feedback.
2421 */
2422 static bool
2423 assign_varying_locations(struct gl_context *ctx,
2424 void *mem_ctx,
2425 struct gl_shader_program *prog,
2426 gl_linked_shader *producer,
2427 gl_linked_shader *consumer,
2428 unsigned num_tfeedback_decls,
2429 tfeedback_decl *tfeedback_decls,
2430 const uint64_t reserved_slots)
2431 {
2432 /* Tessellation shaders treat inputs and outputs as shared memory and can
2433 * access inputs and outputs of other invocations.
2434 * Therefore, they can't be lowered to temps easily (and definitely not
2435 * efficiently).
2436 */
2437 bool unpackable_tess =
2438 (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
2439 (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
2440 (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
2441
2442 /* Transform feedback code assumes varying arrays are packed, so if the
2443 * driver has disabled varying packing, make sure to at least enable
2444 * packing required by transform feedback.
2445 */
2446 bool xfb_enabled =
2447 ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
2448
2449 /* Disable packing on outward facing interfaces for SSO because in ES we
2450 * need to retain the unpacked varying information for draw time
2451 * validation.
2452 *
2453 * Packing is still enabled on individual arrays, structs, and matrices as
2454 * these are required by the transform feedback code and it is still safe
2455 * to do so. We also enable packing when a varying is only used for
2456 * transform feedback and its not a SSO.
2457 */
2458 bool disable_varying_packing =
2459 ctx->Const.DisableVaryingPacking || unpackable_tess;
2460 if (prog->SeparateShader && (producer == NULL || consumer == NULL))
2461 disable_varying_packing = true;
2462
2463 varying_matches matches(disable_varying_packing, xfb_enabled,
2464 ctx->Extensions.ARB_enhanced_layouts,
2465 producer ? producer->Stage : MESA_SHADER_NONE,
2466 consumer ? consumer->Stage : MESA_SHADER_NONE);
2467 hash_table *tfeedback_candidates =
2468 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2469 _mesa_key_string_equal);
2470 hash_table *consumer_inputs =
2471 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2472 _mesa_key_string_equal);
2473 hash_table *consumer_interface_inputs =
2474 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2475 _mesa_key_string_equal);
2476 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
2477 NULL,
2478 };
2479
2480 unsigned consumer_vertices = 0;
2481 if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
2482 consumer_vertices = prog->Geom.VerticesIn;
2483
2484 /* Operate in a total of four passes.
2485 *
2486 * 1. Sort inputs / outputs into a canonical order. This is necessary so
2487 * that inputs / outputs of separable shaders will be assigned
2488 * predictable locations regardless of the order in which declarations
2489 * appeared in the shader source.
2490 *
2491 * 2. Assign locations for any matching inputs and outputs.
2492 *
2493 * 3. Mark output variables in the producer that do not have locations as
2494 * not being outputs. This lets the optimizer eliminate them.
2495 *
2496 * 4. Mark input variables in the consumer that do not have locations as
2497 * not being inputs. This lets the optimizer eliminate them.
2498 */
2499 if (consumer)
2500 canonicalize_shader_io(consumer->ir, ir_var_shader_in);
2501
2502 if (producer)
2503 canonicalize_shader_io(producer->ir, ir_var_shader_out);
2504
2505 if (consumer)
2506 linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
2507 consumer_inputs,
2508 consumer_interface_inputs,
2509 consumer_inputs_with_locations);
2510
2511 if (producer) {
2512 foreach_in_list(ir_instruction, node, producer->ir) {
2513 ir_variable *const output_var = node->as_variable();
2514
2515 if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
2516 continue;
2517
2518 /* Only geometry shaders can use non-zero streams */
2519 assert(output_var->data.stream == 0 ||
2520 (output_var->data.stream < MAX_VERTEX_STREAMS &&
2521 producer->Stage == MESA_SHADER_GEOMETRY));
2522
2523 if (num_tfeedback_decls > 0) {
2524 tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates, producer->Stage);
2525 /* From OpenGL 4.6 (Core Profile) spec, section 11.1.2.1
2526 * ("Vertex Shader Variables / Output Variables")
2527 *
2528 * "Each program object can specify a set of output variables from
2529 * one shader to be recorded in transform feedback mode (see
2530 * section 13.3). The variables that can be recorded are those
2531 * emitted by the first active shader, in order, from the
2532 * following list:
2533 *
2534 * * geometry shader
2535 * * tessellation evaluation shader
2536 * * tessellation control shader
2537 * * vertex shader"
2538 *
2539 * But on OpenGL ES 3.2, section 11.1.2.1 ("Vertex Shader
2540 * Variables / Output Variables") tessellation control shader is
2541 * not included in the stages list.
2542 */
2543 if (!prog->IsES || producer->Stage != MESA_SHADER_TESS_CTRL) {
2544 g.process(output_var);
2545 }
2546 }
2547
2548 ir_variable *const input_var =
2549 linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
2550 consumer_interface_inputs,
2551 consumer_inputs_with_locations);
2552
2553 /* If a matching input variable was found, add this output (and the
2554 * input) to the set. If this is a separable program and there is no
2555 * consumer stage, add the output.
2556 *
2557 * Always add TCS outputs. They are shared by all invocations
2558 * within a patch and can be used as shared memory.
2559 */
2560 if (input_var || (prog->SeparateShader && consumer == NULL) ||
2561 producer->Stage == MESA_SHADER_TESS_CTRL) {
2562 matches.record(output_var, input_var);
2563 }
2564
2565 /* Only stream 0 outputs can be consumed in the next stage */
2566 if (input_var && output_var->data.stream != 0) {
2567 linker_error(prog, "output %s is assigned to stream=%d but "
2568 "is linked to an input, which requires stream=0",
2569 output_var->name, output_var->data.stream);
2570 return false;
2571 }
2572 }
2573 } else {
2574 /* If there's no producer stage, then this must be a separable program.
2575 * For example, we may have a program that has just a fragment shader.
2576 * Later this program will be used with some arbitrary vertex (or
2577 * geometry) shader program. This means that locations must be assigned
2578 * for all the inputs.
2579 */
2580 foreach_in_list(ir_instruction, node, consumer->ir) {
2581 ir_variable *const input_var = node->as_variable();
2582 if (input_var && input_var->data.mode == ir_var_shader_in) {
2583 matches.record(NULL, input_var);
2584 }
2585 }
2586 }
2587
2588 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2589 if (!tfeedback_decls[i].is_varying())
2590 continue;
2591
2592 const tfeedback_candidate *matched_candidate
2593 = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
2594
2595 if (matched_candidate == NULL) {
2596 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2597 return false;
2598 }
2599
2600 /* Mark xfb varyings as always active */
2601 matched_candidate->toplevel_var->data.always_active_io = 1;
2602
2603 /* Mark any corresponding inputs as always active also. We must do this
2604 * because we have a NIR pass that lowers vectors to scalars and another
2605 * that removes unused varyings.
2606 * We don't split varyings marked as always active because there is no
2607 * point in doing so. This means we need to mark both sides of the
2608 * interface as always active otherwise we will have a mismatch and
2609 * start removing things we shouldn't.
2610 */
2611 ir_variable *const input_var =
2612 linker::get_matching_input(mem_ctx, matched_candidate->toplevel_var,
2613 consumer_inputs,
2614 consumer_interface_inputs,
2615 consumer_inputs_with_locations);
2616 if (input_var)
2617 input_var->data.always_active_io = 1;
2618
2619 if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
2620 matched_candidate->toplevel_var->data.is_xfb_only = 1;
2621 matches.record(matched_candidate->toplevel_var, NULL);
2622 }
2623 }
2624
2625 _mesa_hash_table_destroy(consumer_inputs, NULL);
2626 _mesa_hash_table_destroy(consumer_interface_inputs, NULL);
2627
2628 uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
2629 const unsigned slots_used = matches.assign_locations(
2630 prog, components, reserved_slots);
2631 matches.store_locations();
2632
2633 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2634 if (tfeedback_decls[i].is_varying()) {
2635 if (!tfeedback_decls[i].assign_location(ctx, prog)) {
2636 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2637 return false;
2638 }
2639 }
2640 }
2641 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2642
2643 if (consumer && producer) {
2644 foreach_in_list(ir_instruction, node, consumer->ir) {
2645 ir_variable *const var = node->as_variable();
2646
2647 if (var && var->data.mode == ir_var_shader_in &&
2648 var->data.is_unmatched_generic_inout) {
2649 if (!prog->IsES && prog->data->Version <= 120) {
2650 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
2651 *
2652 * Only those varying variables used (i.e. read) in
2653 * the fragment shader executable must be written to
2654 * by the vertex shader executable; declaring
2655 * superfluous varying variables in a vertex shader is
2656 * permissible.
2657 *
2658 * We interpret this text as meaning that the VS must
2659 * write the variable for the FS to read it. See
2660 * "glsl1-varying read but not written" in piglit.
2661 */
2662 linker_error(prog, "%s shader varying %s not written "
2663 "by %s shader\n.",
2664 _mesa_shader_stage_to_string(consumer->Stage),
2665 var->name,
2666 _mesa_shader_stage_to_string(producer->Stage));
2667 } else {
2668 linker_warning(prog, "%s shader varying %s not written "
2669 "by %s shader\n.",
2670 _mesa_shader_stage_to_string(consumer->Stage),
2671 var->name,
2672 _mesa_shader_stage_to_string(producer->Stage));
2673 }
2674 }
2675 }
2676
2677 /* Now that validation is done its safe to remove unused varyings. As
2678 * we have both a producer and consumer its safe to remove unused
2679 * varyings even if the program is a SSO because the stages are being
2680 * linked together i.e. we have a multi-stage SSO.
2681 */
2682 remove_unused_shader_inputs_and_outputs(false, producer,
2683 ir_var_shader_out);
2684 remove_unused_shader_inputs_and_outputs(false, consumer,
2685 ir_var_shader_in);
2686 }
2687
2688 if (producer) {
2689 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
2690 0, producer, disable_varying_packing,
2691 xfb_enabled);
2692 }
2693
2694 if (consumer) {
2695 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
2696 consumer_vertices, consumer,
2697 disable_varying_packing, xfb_enabled);
2698 }
2699
2700 return true;
2701 }
2702
2703 static bool
2704 check_against_output_limit(struct gl_context *ctx,
2705 struct gl_shader_program *prog,
2706 gl_linked_shader *producer,
2707 unsigned num_explicit_locations)
2708 {
2709 unsigned output_vectors = num_explicit_locations;
2710
2711 foreach_in_list(ir_instruction, node, producer->ir) {
2712 ir_variable *const var = node->as_variable();
2713
2714 if (var && !var->data.explicit_location &&
2715 var->data.mode == ir_var_shader_out &&
2716 var_counts_against_varying_limit(producer->Stage, var)) {
2717 /* outputs for fragment shader can't be doubles */
2718 output_vectors += var->type->count_attribute_slots(false);
2719 }
2720 }
2721
2722 assert(producer->Stage != MESA_SHADER_FRAGMENT);
2723 unsigned max_output_components =
2724 ctx->Const.Program[producer->Stage].MaxOutputComponents;
2725
2726 const unsigned output_components = output_vectors * 4;
2727 if (output_components > max_output_components) {
2728 if (ctx->API == API_OPENGLES2 || prog->IsES)
2729 linker_error(prog, "%s shader uses too many output vectors "
2730 "(%u > %u)\n",
2731 _mesa_shader_stage_to_string(producer->Stage),
2732 output_vectors,
2733 max_output_components / 4);
2734 else
2735 linker_error(prog, "%s shader uses too many output components "
2736 "(%u > %u)\n",
2737 _mesa_shader_stage_to_string(producer->Stage),
2738 output_components,
2739 max_output_components);
2740
2741 return false;
2742 }
2743
2744 return true;
2745 }
2746
2747 static bool
2748 check_against_input_limit(struct gl_context *ctx,
2749 struct gl_shader_program *prog,
2750 gl_linked_shader *consumer,
2751 unsigned num_explicit_locations)
2752 {
2753 unsigned input_vectors = num_explicit_locations;
2754
2755 foreach_in_list(ir_instruction, node, consumer->ir) {
2756 ir_variable *const var = node->as_variable();
2757
2758 if (var && !var->data.explicit_location &&
2759 var->data.mode == ir_var_shader_in &&
2760 var_counts_against_varying_limit(consumer->Stage, var)) {
2761 /* vertex inputs aren't varying counted */
2762 input_vectors += var->type->count_attribute_slots(false);
2763 }
2764 }
2765
2766 assert(consumer->Stage != MESA_SHADER_VERTEX);
2767 unsigned max_input_components =
2768 ctx->Const.Program[consumer->Stage].MaxInputComponents;
2769
2770 const unsigned input_components = input_vectors * 4;
2771 if (input_components > max_input_components) {
2772 if (ctx->API == API_OPENGLES2 || prog->IsES)
2773 linker_error(prog, "%s shader uses too many input vectors "
2774 "(%u > %u)\n",
2775 _mesa_shader_stage_to_string(consumer->Stage),
2776 input_vectors,
2777 max_input_components / 4);
2778 else
2779 linker_error(prog, "%s shader uses too many input components "
2780 "(%u > %u)\n",
2781 _mesa_shader_stage_to_string(consumer->Stage),
2782 input_components,
2783 max_input_components);
2784
2785 return false;
2786 }
2787
2788 return true;
2789 }
2790
2791 bool
2792 link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
2793 struct gl_context *ctx, void *mem_ctx)
2794 {
2795 bool has_xfb_qualifiers = false;
2796 unsigned num_tfeedback_decls = 0;
2797 char **varying_names = NULL;
2798 tfeedback_decl *tfeedback_decls = NULL;
2799
2800 /* From the ARB_enhanced_layouts spec:
2801 *
2802 * "If the shader used to record output variables for transform feedback
2803 * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
2804 * qualifiers, the values specified by TransformFeedbackVaryings are
2805 * ignored, and the set of variables captured for transform feedback is
2806 * instead derived from the specified layout qualifiers."
2807 */
2808 for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
2809 /* Find last stage before fragment shader */
2810 if (prog->_LinkedShaders[i]) {
2811 has_xfb_qualifiers =
2812 process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
2813 prog, &num_tfeedback_decls,
2814 &varying_names);
2815 break;
2816 }
2817 }
2818
2819 if (!has_xfb_qualifiers) {
2820 num_tfeedback_decls = prog->TransformFeedback.NumVarying;
2821 varying_names = prog->TransformFeedback.VaryingNames;
2822 }
2823
2824 if (num_tfeedback_decls != 0) {
2825 /* From GL_EXT_transform_feedback:
2826 * A program will fail to link if:
2827 *
2828 * * the <count> specified by TransformFeedbackVaryingsEXT is
2829 * non-zero, but the program object has no vertex or geometry
2830 * shader;
2831 */
2832 if (first >= MESA_SHADER_FRAGMENT) {
2833 linker_error(prog, "Transform feedback varyings specified, but "
2834 "no vertex, tessellation, or geometry shader is "
2835 "present.\n");
2836 return false;
2837 }
2838
2839 tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
2840 num_tfeedback_decls);
2841 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2842 varying_names, tfeedback_decls))
2843 return false;
2844 }
2845
2846 /* If there is no fragment shader we need to set transform feedback.
2847 *
2848 * For SSO we also need to assign output locations. We assign them here
2849 * because we need to do it for both single stage programs and multi stage
2850 * programs.
2851 */
2852 if (last < MESA_SHADER_FRAGMENT &&
2853 (num_tfeedback_decls != 0 || prog->SeparateShader)) {
2854 const uint64_t reserved_out_slots =
2855 reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
2856 if (!assign_varying_locations(ctx, mem_ctx, prog,
2857 prog->_LinkedShaders[last], NULL,
2858 num_tfeedback_decls, tfeedback_decls,
2859 reserved_out_slots))
2860 return false;
2861 }
2862
2863 if (last <= MESA_SHADER_FRAGMENT) {
2864 /* Remove unused varyings from the first/last stage unless SSO */
2865 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2866 prog->_LinkedShaders[first],
2867 ir_var_shader_in);
2868 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2869 prog->_LinkedShaders[last],
2870 ir_var_shader_out);
2871
2872 /* If the program is made up of only a single stage */
2873 if (first == last) {
2874 gl_linked_shader *const sh = prog->_LinkedShaders[last];
2875
2876 do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
2877 do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
2878 tfeedback_decls);
2879
2880 if (prog->SeparateShader) {
2881 const uint64_t reserved_slots =
2882 reserved_varying_slot(sh, ir_var_shader_in);
2883
2884 /* Assign input locations for SSO, output locations are already
2885 * assigned.
2886 */
2887 if (!assign_varying_locations(ctx, mem_ctx, prog,
2888 NULL /* producer */,
2889 sh /* consumer */,
2890 0 /* num_tfeedback_decls */,
2891 NULL /* tfeedback_decls */,
2892 reserved_slots))
2893 return false;
2894 }
2895 } else {
2896 /* Linking the stages in the opposite order (from fragment to vertex)
2897 * ensures that inter-shader outputs written to in an earlier stage
2898 * are eliminated if they are (transitively) not used in a later
2899 * stage.
2900 */
2901 int next = last;
2902 for (int i = next - 1; i >= 0; i--) {
2903 if (prog->_LinkedShaders[i] == NULL && i != 0)
2904 continue;
2905
2906 gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
2907 gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
2908
2909 const uint64_t reserved_out_slots =
2910 reserved_varying_slot(sh_i, ir_var_shader_out);
2911 const uint64_t reserved_in_slots =
2912 reserved_varying_slot(sh_next, ir_var_shader_in);
2913
2914 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2915 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2916 tfeedback_decls);
2917
2918 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2919 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2920 tfeedback_decls,
2921 reserved_out_slots | reserved_in_slots))
2922 return false;
2923
2924 /* This must be done after all dead varyings are eliminated. */
2925 if (sh_i != NULL) {
2926 unsigned slots_used = util_bitcount64(reserved_out_slots);
2927 if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
2928 return false;
2929 }
2930 }
2931
2932 unsigned slots_used = util_bitcount64(reserved_in_slots);
2933 if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
2934 return false;
2935
2936 next = i;
2937 }
2938 }
2939 }
2940
2941 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
2942 has_xfb_qualifiers))
2943 return false;
2944
2945 return true;
2946 }