glsl/linker: simplify xfb_offset vs xfb_stride overflow check
[mesa.git] / src / compiler / glsl / link_varyings.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file link_varyings.cpp
26 *
27 * Linker functions related specifically to linking varyings between shader
28 * stages.
29 */
30
31
32 #include "main/errors.h"
33 #include "main/mtypes.h"
34 #include "glsl_symbol_table.h"
35 #include "glsl_parser_extras.h"
36 #include "ir_optimization.h"
37 #include "linker.h"
38 #include "link_varyings.h"
39 #include "main/macros.h"
40 #include "util/hash_table.h"
41 #include "util/u_math.h"
42 #include "program.h"
43
44
45 /**
46 * Get the varying type stripped of the outermost array if we're processing
47 * a stage whose varyings are arrays indexed by a vertex number (such as
48 * geometry shader inputs).
49 */
50 static const glsl_type *
51 get_varying_type(const ir_variable *var, gl_shader_stage stage)
52 {
53 const glsl_type *type = var->type;
54
55 if (!var->data.patch &&
56 ((var->data.mode == ir_var_shader_out &&
57 stage == MESA_SHADER_TESS_CTRL) ||
58 (var->data.mode == ir_var_shader_in &&
59 (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
60 stage == MESA_SHADER_GEOMETRY)))) {
61 assert(type->is_array());
62 type = type->fields.array;
63 }
64
65 return type;
66 }
67
68 static void
69 create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
70 size_t name_length, unsigned *count,
71 const char *ifc_member_name,
72 const glsl_type *ifc_member_t, char ***varying_names)
73 {
74 if (t->is_interface()) {
75 size_t new_length = name_length;
76
77 assert(ifc_member_name && ifc_member_t);
78 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
79
80 create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
81 NULL, NULL, varying_names);
82 } else if (t->is_struct()) {
83 for (unsigned i = 0; i < t->length; i++) {
84 const char *field = t->fields.structure[i].name;
85 size_t new_length = name_length;
86
87 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
88
89 create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
90 new_length, count, NULL, NULL,
91 varying_names);
92 }
93 } else if (t->without_array()->is_struct() ||
94 t->without_array()->is_interface() ||
95 (t->is_array() && t->fields.array->is_array())) {
96 for (unsigned i = 0; i < t->length; i++) {
97 size_t new_length = name_length;
98
99 /* Append the subscript to the current variable name */
100 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
101
102 create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
103 count, ifc_member_name, ifc_member_t,
104 varying_names);
105 }
106 } else {
107 (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
108 }
109 }
110
111 static bool
112 process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
113 struct gl_shader_program *prog,
114 unsigned *num_tfeedback_decls,
115 char ***varying_names)
116 {
117 bool has_xfb_qualifiers = false;
118
119 /* We still need to enable transform feedback mode even if xfb_stride is
120 * only applied to a global out. Also we don't bother to propagate
121 * xfb_stride to interface block members so this will catch that case also.
122 */
123 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
124 if (prog->TransformFeedback.BufferStride[j]) {
125 has_xfb_qualifiers = true;
126 break;
127 }
128 }
129
130 foreach_in_list(ir_instruction, node, sh->ir) {
131 ir_variable *var = node->as_variable();
132 if (!var || var->data.mode != ir_var_shader_out)
133 continue;
134
135 /* From the ARB_enhanced_layouts spec:
136 *
137 * "Any shader making any static use (after preprocessing) of any of
138 * these *xfb_* qualifiers will cause the shader to be in a
139 * transform feedback capturing mode and hence responsible for
140 * describing the transform feedback setup. This mode will capture
141 * any output selected by *xfb_offset*, directly or indirectly, to
142 * a transform feedback buffer."
143 */
144 if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
145 has_xfb_qualifiers = true;
146 }
147
148 if (var->data.explicit_xfb_offset) {
149 *num_tfeedback_decls += var->type->varying_count();
150 has_xfb_qualifiers = true;
151 }
152 }
153
154 if (*num_tfeedback_decls == 0)
155 return has_xfb_qualifiers;
156
157 unsigned i = 0;
158 *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
159 foreach_in_list(ir_instruction, node, sh->ir) {
160 ir_variable *var = node->as_variable();
161 if (!var || var->data.mode != ir_var_shader_out)
162 continue;
163
164 if (var->data.explicit_xfb_offset) {
165 char *name;
166 const glsl_type *type, *member_type;
167
168 if (var->data.from_named_ifc_block) {
169 type = var->get_interface_type();
170
171 /* Find the member type before it was altered by lowering */
172 const glsl_type *type_wa = type->without_array();
173 member_type =
174 type_wa->fields.structure[type_wa->field_index(var->name)].type;
175 name = ralloc_strdup(NULL, type_wa->name);
176 } else {
177 type = var->type;
178 member_type = NULL;
179 name = ralloc_strdup(NULL, var->name);
180 }
181 create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
182 var->name, member_type, varying_names);
183 ralloc_free(name);
184 }
185 }
186
187 assert(i == *num_tfeedback_decls);
188 return has_xfb_qualifiers;
189 }
190
191 /**
192 * Validate the types and qualifiers of an output from one stage against the
193 * matching input to another stage.
194 */
195 static void
196 cross_validate_types_and_qualifiers(struct gl_context *ctx,
197 struct gl_shader_program *prog,
198 const ir_variable *input,
199 const ir_variable *output,
200 gl_shader_stage consumer_stage,
201 gl_shader_stage producer_stage)
202 {
203 /* Check that the types match between stages.
204 */
205 const glsl_type *type_to_match = input->type;
206
207 /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
208 const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
209 consumer_stage != MESA_SHADER_FRAGMENT) ||
210 consumer_stage == MESA_SHADER_GEOMETRY;
211 if (extra_array_level) {
212 assert(type_to_match->is_array());
213 type_to_match = type_to_match->fields.array;
214 }
215
216 if (type_to_match != output->type) {
217 /* There is a bit of a special case for gl_TexCoord. This
218 * built-in is unsized by default. Applications that variable
219 * access it must redeclare it with a size. There is some
220 * language in the GLSL spec that implies the fragment shader
221 * and vertex shader do not have to agree on this size. Other
222 * driver behave this way, and one or two applications seem to
223 * rely on it.
224 *
225 * Neither declaration needs to be modified here because the array
226 * sizes are fixed later when update_array_sizes is called.
227 *
228 * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
229 *
230 * "Unlike user-defined varying variables, the built-in
231 * varying variables don't have a strict one-to-one
232 * correspondence between the vertex language and the
233 * fragment language."
234 */
235 if (!output->type->is_array() || !is_gl_identifier(output->name)) {
236 linker_error(prog,
237 "%s shader output `%s' declared as type `%s', "
238 "but %s shader input declared as type `%s'\n",
239 _mesa_shader_stage_to_string(producer_stage),
240 output->name,
241 output->type->name,
242 _mesa_shader_stage_to_string(consumer_stage),
243 input->type->name);
244 return;
245 }
246 }
247
248 /* Check that all of the qualifiers match between stages.
249 */
250
251 /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
252 * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
253 * conformance test suite does not verify that the qualifiers must match.
254 * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
255 * OpenGLES 3.0 drivers, so we relax the checking in all cases.
256 */
257 if (false /* always skip the centroid check */ &&
258 prog->data->Version < (prog->IsES ? 310 : 430) &&
259 input->data.centroid != output->data.centroid) {
260 linker_error(prog,
261 "%s shader output `%s' %s centroid qualifier, "
262 "but %s shader input %s centroid qualifier\n",
263 _mesa_shader_stage_to_string(producer_stage),
264 output->name,
265 (output->data.centroid) ? "has" : "lacks",
266 _mesa_shader_stage_to_string(consumer_stage),
267 (input->data.centroid) ? "has" : "lacks");
268 return;
269 }
270
271 if (input->data.sample != output->data.sample) {
272 linker_error(prog,
273 "%s shader output `%s' %s sample qualifier, "
274 "but %s shader input %s sample qualifier\n",
275 _mesa_shader_stage_to_string(producer_stage),
276 output->name,
277 (output->data.sample) ? "has" : "lacks",
278 _mesa_shader_stage_to_string(consumer_stage),
279 (input->data.sample) ? "has" : "lacks");
280 return;
281 }
282
283 if (input->data.patch != output->data.patch) {
284 linker_error(prog,
285 "%s shader output `%s' %s patch qualifier, "
286 "but %s shader input %s patch qualifier\n",
287 _mesa_shader_stage_to_string(producer_stage),
288 output->name,
289 (output->data.patch) ? "has" : "lacks",
290 _mesa_shader_stage_to_string(consumer_stage),
291 (input->data.patch) ? "has" : "lacks");
292 return;
293 }
294
295 /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
296 *
297 * "As only outputs need be declared with invariant, an output from
298 * one shader stage will still match an input of a subsequent stage
299 * without the input being declared as invariant."
300 *
301 * while GLSL 4.20 says:
302 *
303 * "For variables leaving one shader and coming into another shader,
304 * the invariant keyword has to be used in both shaders, or a link
305 * error will result."
306 *
307 * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
308 *
309 * "The invariance of varyings that are declared in both the vertex
310 * and fragment shaders must match."
311 */
312 if (input->data.invariant != output->data.invariant &&
313 prog->data->Version < (prog->IsES ? 300 : 430)) {
314 linker_error(prog,
315 "%s shader output `%s' %s invariant qualifier, "
316 "but %s shader input %s invariant qualifier\n",
317 _mesa_shader_stage_to_string(producer_stage),
318 output->name,
319 (output->data.invariant) ? "has" : "lacks",
320 _mesa_shader_stage_to_string(consumer_stage),
321 (input->data.invariant) ? "has" : "lacks");
322 return;
323 }
324
325 /* GLSL >= 4.40 removes text requiring interpolation qualifiers
326 * to match cross stage, they must only match within the same stage.
327 *
328 * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
329 *
330 * "It is a link-time error if, within the same stage, the interpolation
331 * qualifiers of variables of the same name do not match.
332 *
333 * Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
334 *
335 * "When no interpolation qualifier is present, smooth interpolation
336 * is used."
337 *
338 * So we match variables where one is smooth and the other has no explicit
339 * qualifier.
340 */
341 unsigned input_interpolation = input->data.interpolation;
342 unsigned output_interpolation = output->data.interpolation;
343 if (prog->IsES) {
344 if (input_interpolation == INTERP_MODE_NONE)
345 input_interpolation = INTERP_MODE_SMOOTH;
346 if (output_interpolation == INTERP_MODE_NONE)
347 output_interpolation = INTERP_MODE_SMOOTH;
348 }
349 if (input_interpolation != output_interpolation &&
350 prog->data->Version < 440) {
351 if (!ctx->Const.AllowGLSLCrossStageInterpolationMismatch) {
352 linker_error(prog,
353 "%s shader output `%s' specifies %s "
354 "interpolation qualifier, "
355 "but %s shader input specifies %s "
356 "interpolation qualifier\n",
357 _mesa_shader_stage_to_string(producer_stage),
358 output->name,
359 interpolation_string(output->data.interpolation),
360 _mesa_shader_stage_to_string(consumer_stage),
361 interpolation_string(input->data.interpolation));
362 return;
363 } else {
364 linker_warning(prog,
365 "%s shader output `%s' specifies %s "
366 "interpolation qualifier, "
367 "but %s shader input specifies %s "
368 "interpolation qualifier\n",
369 _mesa_shader_stage_to_string(producer_stage),
370 output->name,
371 interpolation_string(output->data.interpolation),
372 _mesa_shader_stage_to_string(consumer_stage),
373 interpolation_string(input->data.interpolation));
374 }
375 }
376 }
377
378 /**
379 * Validate front and back color outputs against single color input
380 */
381 static void
382 cross_validate_front_and_back_color(struct gl_context *ctx,
383 struct gl_shader_program *prog,
384 const ir_variable *input,
385 const ir_variable *front_color,
386 const ir_variable *back_color,
387 gl_shader_stage consumer_stage,
388 gl_shader_stage producer_stage)
389 {
390 if (front_color != NULL && front_color->data.assigned)
391 cross_validate_types_and_qualifiers(ctx, prog, input, front_color,
392 consumer_stage, producer_stage);
393
394 if (back_color != NULL && back_color->data.assigned)
395 cross_validate_types_and_qualifiers(ctx, prog, input, back_color,
396 consumer_stage, producer_stage);
397 }
398
399 static unsigned
400 compute_variable_location_slot(ir_variable *var, gl_shader_stage stage)
401 {
402 unsigned location_start = VARYING_SLOT_VAR0;
403
404 switch (stage) {
405 case MESA_SHADER_VERTEX:
406 if (var->data.mode == ir_var_shader_in)
407 location_start = VERT_ATTRIB_GENERIC0;
408 break;
409 case MESA_SHADER_TESS_CTRL:
410 case MESA_SHADER_TESS_EVAL:
411 if (var->data.patch)
412 location_start = VARYING_SLOT_PATCH0;
413 break;
414 case MESA_SHADER_FRAGMENT:
415 if (var->data.mode == ir_var_shader_out)
416 location_start = FRAG_RESULT_DATA0;
417 break;
418 default:
419 break;
420 }
421
422 return var->data.location - location_start;
423 }
424
425 struct explicit_location_info {
426 ir_variable *var;
427 unsigned numerical_type;
428 unsigned interpolation;
429 bool centroid;
430 bool sample;
431 bool patch;
432 };
433
434 static inline unsigned
435 get_numerical_type(const glsl_type *type)
436 {
437 /* From the OpenGL 4.6 spec, section 4.4.1 Input Layout Qualifiers, Page 68,
438 * (Location aliasing):
439 *
440 * "Further, when location aliasing, the aliases sharing the location
441 * must have the same underlying numerical type (floating-point or
442 * integer)
443 */
444 if (type->is_float() || type->is_double())
445 return GLSL_TYPE_FLOAT;
446 return GLSL_TYPE_INT;
447 }
448
449 static bool
450 check_location_aliasing(struct explicit_location_info explicit_locations[][4],
451 ir_variable *var,
452 unsigned location,
453 unsigned component,
454 unsigned location_limit,
455 const glsl_type *type,
456 unsigned interpolation,
457 bool centroid,
458 bool sample,
459 bool patch,
460 gl_shader_program *prog,
461 gl_shader_stage stage)
462 {
463 unsigned last_comp;
464 if (type->without_array()->is_struct()) {
465 /* The component qualifier can't be used on structs so just treat
466 * all component slots as used.
467 */
468 last_comp = 4;
469 } else {
470 unsigned dmul = type->without_array()->is_64bit() ? 2 : 1;
471 last_comp = component + type->without_array()->vector_elements * dmul;
472 }
473
474 while (location < location_limit) {
475 unsigned comp = 0;
476 while (comp < 4) {
477 struct explicit_location_info *info =
478 &explicit_locations[location][comp];
479
480 if (info->var) {
481 /* Component aliasing is not alloed */
482 if (comp >= component && comp < last_comp) {
483 linker_error(prog,
484 "%s shader has multiple %sputs explicitly "
485 "assigned to location %d and component %d\n",
486 _mesa_shader_stage_to_string(stage),
487 var->data.mode == ir_var_shader_in ? "in" : "out",
488 location, comp);
489 return false;
490 } else {
491 /* For all other used components we need to have matching
492 * types, interpolation and auxiliary storage
493 */
494 if (info->numerical_type !=
495 get_numerical_type(type->without_array())) {
496 linker_error(prog,
497 "Varyings sharing the same location must "
498 "have the same underlying numerical type. "
499 "Location %u component %u\n",
500 location, comp);
501 return false;
502 }
503
504 if (info->interpolation != interpolation) {
505 linker_error(prog,
506 "%s shader has multiple %sputs at explicit "
507 "location %u with different interpolation "
508 "settings\n",
509 _mesa_shader_stage_to_string(stage),
510 var->data.mode == ir_var_shader_in ?
511 "in" : "out", location);
512 return false;
513 }
514
515 if (info->centroid != centroid ||
516 info->sample != sample ||
517 info->patch != patch) {
518 linker_error(prog,
519 "%s shader has multiple %sputs at explicit "
520 "location %u with different aux storage\n",
521 _mesa_shader_stage_to_string(stage),
522 var->data.mode == ir_var_shader_in ?
523 "in" : "out", location);
524 return false;
525 }
526 }
527 } else if (comp >= component && comp < last_comp) {
528 info->var = var;
529 info->numerical_type = get_numerical_type(type->without_array());
530 info->interpolation = interpolation;
531 info->centroid = centroid;
532 info->sample = sample;
533 info->patch = patch;
534 }
535
536 comp++;
537
538 /* We need to do some special handling for doubles as dvec3 and
539 * dvec4 consume two consecutive locations. We don't need to
540 * worry about components beginning at anything other than 0 as
541 * the spec does not allow this for dvec3 and dvec4.
542 */
543 if (comp == 4 && last_comp > 4) {
544 last_comp = last_comp - 4;
545 /* Bump location index and reset the component index */
546 location++;
547 comp = 0;
548 component = 0;
549 }
550 }
551
552 location++;
553 }
554
555 return true;
556 }
557
558 static bool
559 validate_explicit_variable_location(struct gl_context *ctx,
560 struct explicit_location_info explicit_locations[][4],
561 ir_variable *var,
562 gl_shader_program *prog,
563 gl_linked_shader *sh)
564 {
565 const glsl_type *type = get_varying_type(var, sh->Stage);
566 unsigned num_elements = type->count_attribute_slots(false);
567 unsigned idx = compute_variable_location_slot(var, sh->Stage);
568 unsigned slot_limit = idx + num_elements;
569
570 /* Vertex shader inputs and fragment shader outputs are validated in
571 * assign_attribute_or_color_locations() so we should not attempt to
572 * validate them again here.
573 */
574 unsigned slot_max;
575 if (var->data.mode == ir_var_shader_out) {
576 assert(sh->Stage != MESA_SHADER_FRAGMENT);
577 slot_max =
578 ctx->Const.Program[sh->Stage].MaxOutputComponents / 4;
579 } else {
580 assert(var->data.mode == ir_var_shader_in);
581 assert(sh->Stage != MESA_SHADER_VERTEX);
582 slot_max =
583 ctx->Const.Program[sh->Stage].MaxInputComponents / 4;
584 }
585
586 if (slot_limit > slot_max) {
587 linker_error(prog,
588 "Invalid location %u in %s shader\n",
589 idx, _mesa_shader_stage_to_string(sh->Stage));
590 return false;
591 }
592
593 const glsl_type *type_without_array = type->without_array();
594 if (type_without_array->is_interface()) {
595 for (unsigned i = 0; i < type_without_array->length; i++) {
596 glsl_struct_field *field = &type_without_array->fields.structure[i];
597 unsigned field_location = field->location -
598 (field->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0);
599 if (!check_location_aliasing(explicit_locations, var,
600 field_location,
601 0, field_location + 1,
602 field->type,
603 field->interpolation,
604 field->centroid,
605 field->sample,
606 field->patch,
607 prog, sh->Stage)) {
608 return false;
609 }
610 }
611 } else if (!check_location_aliasing(explicit_locations, var,
612 idx, var->data.location_frac,
613 slot_limit, type,
614 var->data.interpolation,
615 var->data.centroid,
616 var->data.sample,
617 var->data.patch,
618 prog, sh->Stage)) {
619 return false;
620 }
621
622 return true;
623 }
624
625 /**
626 * Validate explicit locations for the inputs to the first stage and the
627 * outputs of the last stage in an SSO program (everything in between is
628 * validated in cross_validate_outputs_to_inputs).
629 */
630 void
631 validate_sso_explicit_locations(struct gl_context *ctx,
632 struct gl_shader_program *prog,
633 gl_shader_stage first_stage,
634 gl_shader_stage last_stage)
635 {
636 assert(prog->SeparateShader);
637
638 /* VS inputs and FS outputs are validated in
639 * assign_attribute_or_color_locations()
640 */
641 bool validate_first_stage = first_stage != MESA_SHADER_VERTEX;
642 bool validate_last_stage = last_stage != MESA_SHADER_FRAGMENT;
643 if (!validate_first_stage && !validate_last_stage)
644 return;
645
646 struct explicit_location_info explicit_locations[MAX_VARYING][4];
647
648 gl_shader_stage stages[2] = { first_stage, last_stage };
649 bool validate_stage[2] = { validate_first_stage, validate_last_stage };
650 ir_variable_mode var_direction[2] = { ir_var_shader_in, ir_var_shader_out };
651
652 for (unsigned i = 0; i < 2; i++) {
653 if (!validate_stage[i])
654 continue;
655
656 gl_shader_stage stage = stages[i];
657
658 gl_linked_shader *sh = prog->_LinkedShaders[stage];
659 assert(sh);
660
661 memset(explicit_locations, 0, sizeof(explicit_locations));
662
663 foreach_in_list(ir_instruction, node, sh->ir) {
664 ir_variable *const var = node->as_variable();
665
666 if (var == NULL ||
667 !var->data.explicit_location ||
668 var->data.location < VARYING_SLOT_VAR0 ||
669 var->data.mode != var_direction[i])
670 continue;
671
672 if (!validate_explicit_variable_location(
673 ctx, explicit_locations, var, prog, sh)) {
674 return;
675 }
676 }
677 }
678 }
679
680 /**
681 * Validate that outputs from one stage match inputs of another
682 */
683 void
684 cross_validate_outputs_to_inputs(struct gl_context *ctx,
685 struct gl_shader_program *prog,
686 gl_linked_shader *producer,
687 gl_linked_shader *consumer)
688 {
689 glsl_symbol_table parameters;
690 struct explicit_location_info output_explicit_locations[MAX_VARYING][4] = { 0 };
691 struct explicit_location_info input_explicit_locations[MAX_VARYING][4] = { 0 };
692
693 /* Find all shader outputs in the "producer" stage.
694 */
695 foreach_in_list(ir_instruction, node, producer->ir) {
696 ir_variable *const var = node->as_variable();
697
698 if (var == NULL || var->data.mode != ir_var_shader_out)
699 continue;
700
701 if (!var->data.explicit_location
702 || var->data.location < VARYING_SLOT_VAR0)
703 parameters.add_variable(var);
704 else {
705 /* User-defined varyings with explicit locations are handled
706 * differently because they do not need to have matching names.
707 */
708 if (!validate_explicit_variable_location(ctx,
709 output_explicit_locations,
710 var, prog, producer)) {
711 return;
712 }
713 }
714 }
715
716
717 /* Find all shader inputs in the "consumer" stage. Any variables that have
718 * matching outputs already in the symbol table must have the same type and
719 * qualifiers.
720 *
721 * Exception: if the consumer is the geometry shader, then the inputs
722 * should be arrays and the type of the array element should match the type
723 * of the corresponding producer output.
724 */
725 foreach_in_list(ir_instruction, node, consumer->ir) {
726 ir_variable *const input = node->as_variable();
727
728 if (input == NULL || input->data.mode != ir_var_shader_in)
729 continue;
730
731 if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
732 const ir_variable *const front_color =
733 parameters.get_variable("gl_FrontColor");
734
735 const ir_variable *const back_color =
736 parameters.get_variable("gl_BackColor");
737
738 cross_validate_front_and_back_color(ctx, prog, input,
739 front_color, back_color,
740 consumer->Stage, producer->Stage);
741 } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
742 const ir_variable *const front_color =
743 parameters.get_variable("gl_FrontSecondaryColor");
744
745 const ir_variable *const back_color =
746 parameters.get_variable("gl_BackSecondaryColor");
747
748 cross_validate_front_and_back_color(ctx, prog, input,
749 front_color, back_color,
750 consumer->Stage, producer->Stage);
751 } else {
752 /* The rules for connecting inputs and outputs change in the presence
753 * of explicit locations. In this case, we no longer care about the
754 * names of the variables. Instead, we care only about the
755 * explicitly assigned location.
756 */
757 ir_variable *output = NULL;
758 if (input->data.explicit_location
759 && input->data.location >= VARYING_SLOT_VAR0) {
760
761 const glsl_type *type = get_varying_type(input, consumer->Stage);
762 unsigned num_elements = type->count_attribute_slots(false);
763 unsigned idx =
764 compute_variable_location_slot(input, consumer->Stage);
765 unsigned slot_limit = idx + num_elements;
766
767 if (!validate_explicit_variable_location(ctx,
768 input_explicit_locations,
769 input, prog, consumer)) {
770 return;
771 }
772
773 while (idx < slot_limit) {
774 if (idx >= MAX_VARYING) {
775 linker_error(prog,
776 "Invalid location %u in %s shader\n", idx,
777 _mesa_shader_stage_to_string(consumer->Stage));
778 return;
779 }
780
781 output = output_explicit_locations[idx][input->data.location_frac].var;
782
783 if (output == NULL) {
784 /* A linker failure should only happen when there is no
785 * output declaration and there is Static Use of the
786 * declared input.
787 */
788 if (input->data.used) {
789 linker_error(prog,
790 "%s shader input `%s' with explicit location "
791 "has no matching output\n",
792 _mesa_shader_stage_to_string(consumer->Stage),
793 input->name);
794 break;
795 }
796 } else if (input->data.location != output->data.location) {
797 linker_error(prog,
798 "%s shader input `%s' with explicit location "
799 "has no matching output\n",
800 _mesa_shader_stage_to_string(consumer->Stage),
801 input->name);
802 break;
803 }
804 idx++;
805 }
806 } else {
807 output = parameters.get_variable(input->name);
808 }
809
810 if (output != NULL) {
811 /* Interface blocks have their own validation elsewhere so don't
812 * try validating them here.
813 */
814 if (!(input->get_interface_type() &&
815 output->get_interface_type()))
816 cross_validate_types_and_qualifiers(ctx, prog, input, output,
817 consumer->Stage,
818 producer->Stage);
819 } else {
820 /* Check for input vars with unmatched output vars in prev stage
821 * taking into account that interface blocks could have a matching
822 * output but with different name, so we ignore them.
823 */
824 assert(!input->data.assigned);
825 if (input->data.used && !input->get_interface_type() &&
826 !input->data.explicit_location && !prog->SeparateShader)
827 linker_error(prog,
828 "%s shader input `%s' "
829 "has no matching output in the previous stage\n",
830 _mesa_shader_stage_to_string(consumer->Stage),
831 input->name);
832 }
833 }
834 }
835 }
836
837 /**
838 * Demote shader inputs and outputs that are not used in other stages, and
839 * remove them via dead code elimination.
840 */
841 static void
842 remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
843 gl_linked_shader *sh,
844 enum ir_variable_mode mode)
845 {
846 if (is_separate_shader_object)
847 return;
848
849 foreach_in_list(ir_instruction, node, sh->ir) {
850 ir_variable *const var = node->as_variable();
851
852 if (var == NULL || var->data.mode != int(mode))
853 continue;
854
855 /* A shader 'in' or 'out' variable is only really an input or output if
856 * its value is used by other shader stages. This will cause the
857 * variable to have a location assigned.
858 */
859 if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
860 assert(var->data.mode != ir_var_temporary);
861
862 /* Assign zeros to demoted inputs to allow more optimizations. */
863 if (var->data.mode == ir_var_shader_in && !var->constant_value)
864 var->constant_value = ir_constant::zero(var, var->type);
865
866 var->data.mode = ir_var_auto;
867 }
868 }
869
870 /* Eliminate code that is now dead due to unused inputs/outputs being
871 * demoted.
872 */
873 while (do_dead_code(sh->ir, false))
874 ;
875
876 }
877
878 /**
879 * Initialize this object based on a string that was passed to
880 * glTransformFeedbackVaryings.
881 *
882 * If the input is mal-formed, this call still succeeds, but it sets
883 * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
884 * will fail to find any matching variable.
885 */
886 void
887 tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
888 const char *input)
889 {
890 /* We don't have to be pedantic about what is a valid GLSL variable name,
891 * because any variable with an invalid name can't exist in the IR anyway.
892 */
893
894 this->location = -1;
895 this->orig_name = input;
896 this->lowered_builtin_array_variable = none;
897 this->skip_components = 0;
898 this->next_buffer_separator = false;
899 this->matched_candidate = NULL;
900 this->stream_id = 0;
901 this->buffer = 0;
902 this->offset = 0;
903
904 if (ctx->Extensions.ARB_transform_feedback3) {
905 /* Parse gl_NextBuffer. */
906 if (strcmp(input, "gl_NextBuffer") == 0) {
907 this->next_buffer_separator = true;
908 return;
909 }
910
911 /* Parse gl_SkipComponents. */
912 if (strcmp(input, "gl_SkipComponents1") == 0)
913 this->skip_components = 1;
914 else if (strcmp(input, "gl_SkipComponents2") == 0)
915 this->skip_components = 2;
916 else if (strcmp(input, "gl_SkipComponents3") == 0)
917 this->skip_components = 3;
918 else if (strcmp(input, "gl_SkipComponents4") == 0)
919 this->skip_components = 4;
920
921 if (this->skip_components)
922 return;
923 }
924
925 /* Parse a declaration. */
926 const char *base_name_end;
927 long subscript = parse_program_resource_name(input, &base_name_end);
928 this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
929 if (this->var_name == NULL) {
930 _mesa_error_no_memory(__func__);
931 return;
932 }
933
934 if (subscript >= 0) {
935 this->array_subscript = subscript;
936 this->is_subscripted = true;
937 } else {
938 this->is_subscripted = false;
939 }
940
941 /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
942 * class must behave specially to account for the fact that gl_ClipDistance
943 * is converted from a float[8] to a vec4[2].
944 */
945 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
946 strcmp(this->var_name, "gl_ClipDistance") == 0) {
947 this->lowered_builtin_array_variable = clip_distance;
948 }
949 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
950 strcmp(this->var_name, "gl_CullDistance") == 0) {
951 this->lowered_builtin_array_variable = cull_distance;
952 }
953
954 if (ctx->Const.LowerTessLevel &&
955 (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
956 this->lowered_builtin_array_variable = tess_level_outer;
957 if (ctx->Const.LowerTessLevel &&
958 (strcmp(this->var_name, "gl_TessLevelInner") == 0))
959 this->lowered_builtin_array_variable = tess_level_inner;
960 }
961
962
963 /**
964 * Determine whether two tfeedback_decl objects refer to the same variable and
965 * array index (if applicable).
966 */
967 bool
968 tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
969 {
970 assert(x.is_varying() && y.is_varying());
971
972 if (strcmp(x.var_name, y.var_name) != 0)
973 return false;
974 if (x.is_subscripted != y.is_subscripted)
975 return false;
976 if (x.is_subscripted && x.array_subscript != y.array_subscript)
977 return false;
978 return true;
979 }
980
981
982 /**
983 * Assign a location and stream ID for this tfeedback_decl object based on the
984 * transform feedback candidate found by find_candidate.
985 *
986 * If an error occurs, the error is reported through linker_error() and false
987 * is returned.
988 */
989 bool
990 tfeedback_decl::assign_location(struct gl_context *ctx,
991 struct gl_shader_program *prog)
992 {
993 assert(this->is_varying());
994
995 unsigned fine_location
996 = this->matched_candidate->toplevel_var->data.location * 4
997 + this->matched_candidate->toplevel_var->data.location_frac
998 + this->matched_candidate->offset;
999 const unsigned dmul =
1000 this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
1001
1002 if (this->matched_candidate->type->is_array()) {
1003 /* Array variable */
1004 const unsigned matrix_cols =
1005 this->matched_candidate->type->fields.array->matrix_columns;
1006 const unsigned vector_elements =
1007 this->matched_candidate->type->fields.array->vector_elements;
1008 unsigned actual_array_size;
1009 switch (this->lowered_builtin_array_variable) {
1010 case clip_distance:
1011 actual_array_size = prog->last_vert_prog ?
1012 prog->last_vert_prog->info.clip_distance_array_size : 0;
1013 break;
1014 case cull_distance:
1015 actual_array_size = prog->last_vert_prog ?
1016 prog->last_vert_prog->info.cull_distance_array_size : 0;
1017 break;
1018 case tess_level_outer:
1019 actual_array_size = 4;
1020 break;
1021 case tess_level_inner:
1022 actual_array_size = 2;
1023 break;
1024 case none:
1025 default:
1026 actual_array_size = this->matched_candidate->type->array_size();
1027 break;
1028 }
1029
1030 if (this->is_subscripted) {
1031 /* Check array bounds. */
1032 if (this->array_subscript >= actual_array_size) {
1033 linker_error(prog, "Transform feedback varying %s has index "
1034 "%i, but the array size is %u.",
1035 this->orig_name, this->array_subscript,
1036 actual_array_size);
1037 return false;
1038 }
1039 unsigned array_elem_size = this->lowered_builtin_array_variable ?
1040 1 : vector_elements * matrix_cols * dmul;
1041 fine_location += array_elem_size * this->array_subscript;
1042 this->size = 1;
1043 } else {
1044 this->size = actual_array_size;
1045 }
1046 this->vector_elements = vector_elements;
1047 this->matrix_columns = matrix_cols;
1048 if (this->lowered_builtin_array_variable)
1049 this->type = GL_FLOAT;
1050 else
1051 this->type = this->matched_candidate->type->fields.array->gl_type;
1052 } else {
1053 /* Regular variable (scalar, vector, or matrix) */
1054 if (this->is_subscripted) {
1055 linker_error(prog, "Transform feedback varying %s requested, "
1056 "but %s is not an array.",
1057 this->orig_name, this->var_name);
1058 return false;
1059 }
1060 this->size = 1;
1061 this->vector_elements = this->matched_candidate->type->vector_elements;
1062 this->matrix_columns = this->matched_candidate->type->matrix_columns;
1063 this->type = this->matched_candidate->type->gl_type;
1064 }
1065 this->location = fine_location / 4;
1066 this->location_frac = fine_location % 4;
1067
1068 /* From GL_EXT_transform_feedback:
1069 * A program will fail to link if:
1070 *
1071 * * the total number of components to capture in any varying
1072 * variable in <varyings> is greater than the constant
1073 * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
1074 * buffer mode is SEPARATE_ATTRIBS_EXT;
1075 */
1076 if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
1077 this->num_components() >
1078 ctx->Const.MaxTransformFeedbackSeparateComponents) {
1079 linker_error(prog, "Transform feedback varying %s exceeds "
1080 "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
1081 this->orig_name);
1082 return false;
1083 }
1084
1085 /* Only transform feedback varyings can be assigned to non-zero streams,
1086 * so assign the stream id here.
1087 */
1088 this->stream_id = this->matched_candidate->toplevel_var->data.stream;
1089
1090 unsigned array_offset = this->array_subscript * 4 * dmul;
1091 unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
1092 this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
1093 this->offset = this->matched_candidate->toplevel_var->data.offset +
1094 array_offset + struct_offset;
1095
1096 return true;
1097 }
1098
1099
1100 unsigned
1101 tfeedback_decl::get_num_outputs() const
1102 {
1103 if (!this->is_varying()) {
1104 return 0;
1105 }
1106 return (this->num_components() + this->location_frac + 3)/4;
1107 }
1108
1109
1110 /**
1111 * Update gl_transform_feedback_info to reflect this tfeedback_decl.
1112 *
1113 * If an error occurs, the error is reported through linker_error() and false
1114 * is returned.
1115 */
1116 bool
1117 tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
1118 struct gl_transform_feedback_info *info,
1119 unsigned buffer, unsigned buffer_index,
1120 const unsigned max_outputs, bool *explicit_stride,
1121 bool has_xfb_qualifiers) const
1122 {
1123 unsigned xfb_offset = 0;
1124 unsigned size = this->size;
1125 /* Handle gl_SkipComponents. */
1126 if (this->skip_components) {
1127 info->Buffers[buffer].Stride += this->skip_components;
1128 size = this->skip_components;
1129 goto store_varying;
1130 }
1131
1132 if (this->next_buffer_separator) {
1133 size = 0;
1134 goto store_varying;
1135 }
1136
1137 if (has_xfb_qualifiers) {
1138 xfb_offset = this->offset / 4;
1139 } else {
1140 xfb_offset = info->Buffers[buffer].Stride;
1141 }
1142 info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
1143
1144 {
1145 unsigned location = this->location;
1146 unsigned location_frac = this->location_frac;
1147 unsigned num_components = this->num_components();
1148 while (num_components > 0) {
1149 unsigned output_size = MIN2(num_components, 4 - location_frac);
1150 assert((info->NumOutputs == 0 && max_outputs == 0) ||
1151 info->NumOutputs < max_outputs);
1152
1153 /* From the ARB_enhanced_layouts spec:
1154 *
1155 * "If such a block member or variable is not written during a shader
1156 * invocation, the buffer contents at the assigned offset will be
1157 * undefined. Even if there are no static writes to a variable or
1158 * member that is assigned a transform feedback offset, the space is
1159 * still allocated in the buffer and still affects the stride."
1160 */
1161 if (this->is_varying_written()) {
1162 info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
1163 info->Outputs[info->NumOutputs].OutputRegister = location;
1164 info->Outputs[info->NumOutputs].NumComponents = output_size;
1165 info->Outputs[info->NumOutputs].StreamId = stream_id;
1166 info->Outputs[info->NumOutputs].OutputBuffer = buffer;
1167 info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
1168 ++info->NumOutputs;
1169 }
1170 info->Buffers[buffer].Stream = this->stream_id;
1171 xfb_offset += output_size;
1172
1173 num_components -= output_size;
1174 location++;
1175 location_frac = 0;
1176 }
1177 }
1178
1179 if (explicit_stride && explicit_stride[buffer]) {
1180 if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
1181 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
1182 "multiple of 8 as its applied to a type that is or "
1183 "contains a double.",
1184 info->Buffers[buffer].Stride * 4);
1185 return false;
1186 }
1187
1188 if (xfb_offset > info->Buffers[buffer].Stride) {
1189 linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
1190 "buffer (%d)", xfb_offset * 4,
1191 info->Buffers[buffer].Stride * 4, buffer);
1192 return false;
1193 }
1194 } else {
1195 info->Buffers[buffer].Stride = xfb_offset;
1196 }
1197
1198 /* From GL_EXT_transform_feedback:
1199 * A program will fail to link if:
1200 *
1201 * * the total number of components to capture is greater than
1202 * the constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
1203 * and the buffer mode is INTERLEAVED_ATTRIBS_EXT.
1204 *
1205 * From GL_ARB_enhanced_layouts:
1206 *
1207 * "The resulting stride (implicit or explicit) must be less than or
1208 * equal to the implementation-dependent constant
1209 * gl_MaxTransformFeedbackInterleavedComponents."
1210 */
1211 if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
1212 has_xfb_qualifiers) &&
1213 info->Buffers[buffer].Stride >
1214 ctx->Const.MaxTransformFeedbackInterleavedComponents) {
1215 linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1216 "limit has been exceeded.");
1217 return false;
1218 }
1219
1220 store_varying:
1221 info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
1222 this->orig_name);
1223 info->Varyings[info->NumVarying].Type = this->type;
1224 info->Varyings[info->NumVarying].Size = size;
1225 info->Varyings[info->NumVarying].BufferIndex = buffer_index;
1226 info->NumVarying++;
1227 info->Buffers[buffer].NumVaryings++;
1228
1229 return true;
1230 }
1231
1232
1233 const tfeedback_candidate *
1234 tfeedback_decl::find_candidate(gl_shader_program *prog,
1235 hash_table *tfeedback_candidates)
1236 {
1237 const char *name = this->var_name;
1238 switch (this->lowered_builtin_array_variable) {
1239 case none:
1240 name = this->var_name;
1241 break;
1242 case clip_distance:
1243 name = "gl_ClipDistanceMESA";
1244 break;
1245 case cull_distance:
1246 name = "gl_CullDistanceMESA";
1247 break;
1248 case tess_level_outer:
1249 name = "gl_TessLevelOuterMESA";
1250 break;
1251 case tess_level_inner:
1252 name = "gl_TessLevelInnerMESA";
1253 break;
1254 }
1255 hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
1256
1257 this->matched_candidate = entry ?
1258 (const tfeedback_candidate *) entry->data : NULL;
1259
1260 if (!this->matched_candidate) {
1261 /* From GL_EXT_transform_feedback:
1262 * A program will fail to link if:
1263 *
1264 * * any variable name specified in the <varyings> array is not
1265 * declared as an output in the geometry shader (if present) or
1266 * the vertex shader (if no geometry shader is present);
1267 */
1268 linker_error(prog, "Transform feedback varying %s undeclared.",
1269 this->orig_name);
1270 }
1271
1272 return this->matched_candidate;
1273 }
1274
1275
1276 /**
1277 * Parse all the transform feedback declarations that were passed to
1278 * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
1279 *
1280 * If an error occurs, the error is reported through linker_error() and false
1281 * is returned.
1282 */
1283 static bool
1284 parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
1285 const void *mem_ctx, unsigned num_names,
1286 char **varying_names, tfeedback_decl *decls)
1287 {
1288 for (unsigned i = 0; i < num_names; ++i) {
1289 decls[i].init(ctx, mem_ctx, varying_names[i]);
1290
1291 if (!decls[i].is_varying())
1292 continue;
1293
1294 /* From GL_EXT_transform_feedback:
1295 * A program will fail to link if:
1296 *
1297 * * any two entries in the <varyings> array specify the same varying
1298 * variable;
1299 *
1300 * We interpret this to mean "any two entries in the <varyings> array
1301 * specify the same varying variable and array index", since transform
1302 * feedback of arrays would be useless otherwise.
1303 */
1304 for (unsigned j = 0; j < i; ++j) {
1305 if (decls[j].is_varying()) {
1306 if (tfeedback_decl::is_same(decls[i], decls[j])) {
1307 linker_error(prog, "Transform feedback varying %s specified "
1308 "more than once.", varying_names[i]);
1309 return false;
1310 }
1311 }
1312 }
1313 }
1314 return true;
1315 }
1316
1317
1318 static int
1319 cmp_xfb_offset(const void * x_generic, const void * y_generic)
1320 {
1321 tfeedback_decl *x = (tfeedback_decl *) x_generic;
1322 tfeedback_decl *y = (tfeedback_decl *) y_generic;
1323
1324 if (x->get_buffer() != y->get_buffer())
1325 return x->get_buffer() - y->get_buffer();
1326 return x->get_offset() - y->get_offset();
1327 }
1328
1329 /**
1330 * Store transform feedback location assignments into
1331 * prog->sh.LinkedTransformFeedback based on the data stored in
1332 * tfeedback_decls.
1333 *
1334 * If an error occurs, the error is reported through linker_error() and false
1335 * is returned.
1336 */
1337 static bool
1338 store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
1339 unsigned num_tfeedback_decls,
1340 tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers)
1341 {
1342 if (!prog->last_vert_prog)
1343 return true;
1344
1345 /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
1346 * tracking the number of buffers doesn't overflow.
1347 */
1348 assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
1349
1350 bool separate_attribs_mode =
1351 prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
1352
1353 struct gl_program *xfb_prog = prog->last_vert_prog;
1354 xfb_prog->sh.LinkedTransformFeedback =
1355 rzalloc(xfb_prog, struct gl_transform_feedback_info);
1356
1357 /* The xfb_offset qualifier does not have to be used in increasing order
1358 * however some drivers expect to receive the list of transform feedback
1359 * declarations in order so sort it now for convenience.
1360 */
1361 if (has_xfb_qualifiers) {
1362 qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
1363 cmp_xfb_offset);
1364 }
1365
1366 xfb_prog->sh.LinkedTransformFeedback->Varyings =
1367 rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
1368 num_tfeedback_decls);
1369
1370 unsigned num_outputs = 0;
1371 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1372 if (tfeedback_decls[i].is_varying_written())
1373 num_outputs += tfeedback_decls[i].get_num_outputs();
1374 }
1375
1376 xfb_prog->sh.LinkedTransformFeedback->Outputs =
1377 rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
1378 num_outputs);
1379
1380 unsigned num_buffers = 0;
1381 unsigned buffers = 0;
1382
1383 if (!has_xfb_qualifiers && separate_attribs_mode) {
1384 /* GL_SEPARATE_ATTRIBS */
1385 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1386 if (!tfeedback_decls[i].store(ctx, prog,
1387 xfb_prog->sh.LinkedTransformFeedback,
1388 num_buffers, num_buffers, num_outputs,
1389 NULL, has_xfb_qualifiers))
1390 return false;
1391
1392 buffers |= 1 << num_buffers;
1393 num_buffers++;
1394 }
1395 }
1396 else {
1397 /* GL_INVERLEAVED_ATTRIBS */
1398 int buffer_stream_id = -1;
1399 unsigned buffer =
1400 num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
1401 bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
1402
1403 /* Apply any xfb_stride global qualifiers */
1404 if (has_xfb_qualifiers) {
1405 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1406 if (prog->TransformFeedback.BufferStride[j]) {
1407 explicit_stride[j] = true;
1408 xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
1409 prog->TransformFeedback.BufferStride[j] / 4;
1410 }
1411 }
1412 }
1413
1414 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1415 if (has_xfb_qualifiers &&
1416 buffer != tfeedback_decls[i].get_buffer()) {
1417 /* we have moved to the next buffer so reset stream id */
1418 buffer_stream_id = -1;
1419 num_buffers++;
1420 }
1421
1422 if (tfeedback_decls[i].is_next_buffer_separator()) {
1423 if (!tfeedback_decls[i].store(ctx, prog,
1424 xfb_prog->sh.LinkedTransformFeedback,
1425 buffer, num_buffers, num_outputs,
1426 explicit_stride, has_xfb_qualifiers))
1427 return false;
1428 num_buffers++;
1429 buffer_stream_id = -1;
1430 continue;
1431 }
1432
1433 if (has_xfb_qualifiers) {
1434 buffer = tfeedback_decls[i].get_buffer();
1435 } else {
1436 buffer = num_buffers;
1437 }
1438
1439 if (tfeedback_decls[i].is_varying()) {
1440 if (buffer_stream_id == -1) {
1441 /* First varying writing to this buffer: remember its stream */
1442 buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
1443
1444 /* Only mark a buffer as active when there is a varying
1445 * attached to it. This behaviour is based on a revised version
1446 * of section 13.2.2 of the GL 4.6 spec.
1447 */
1448 buffers |= 1 << buffer;
1449 } else if (buffer_stream_id !=
1450 (int) tfeedback_decls[i].get_stream_id()) {
1451 /* Varying writes to the same buffer from a different stream */
1452 linker_error(prog,
1453 "Transform feedback can't capture varyings belonging "
1454 "to different vertex streams in a single buffer. "
1455 "Varying %s writes to buffer from stream %u, other "
1456 "varyings in the same buffer write from stream %u.",
1457 tfeedback_decls[i].name(),
1458 tfeedback_decls[i].get_stream_id(),
1459 buffer_stream_id);
1460 return false;
1461 }
1462 }
1463
1464 if (!tfeedback_decls[i].store(ctx, prog,
1465 xfb_prog->sh.LinkedTransformFeedback,
1466 buffer, num_buffers, num_outputs,
1467 explicit_stride, has_xfb_qualifiers))
1468 return false;
1469 }
1470 }
1471
1472 assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
1473
1474 xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
1475 return true;
1476 }
1477
1478 namespace {
1479
1480 /**
1481 * Data structure recording the relationship between outputs of one shader
1482 * stage (the "producer") and inputs of another (the "consumer").
1483 */
1484 class varying_matches
1485 {
1486 public:
1487 varying_matches(bool disable_varying_packing, bool xfb_enabled,
1488 bool enhanced_layouts_enabled,
1489 gl_shader_stage producer_stage,
1490 gl_shader_stage consumer_stage);
1491 ~varying_matches();
1492 void record(ir_variable *producer_var, ir_variable *consumer_var);
1493 unsigned assign_locations(struct gl_shader_program *prog,
1494 uint8_t components[],
1495 uint64_t reserved_slots);
1496 void store_locations() const;
1497
1498 private:
1499 bool is_varying_packing_safe(const glsl_type *type,
1500 const ir_variable *var) const;
1501
1502 /**
1503 * If true, this driver disables varying packing, so all varyings need to
1504 * be aligned on slot boundaries, and take up a number of slots equal to
1505 * their number of matrix columns times their array size.
1506 *
1507 * Packing may also be disabled because our current packing method is not
1508 * safe in SSO or versions of OpenGL where interpolation qualifiers are not
1509 * guaranteed to match across stages.
1510 */
1511 const bool disable_varying_packing;
1512
1513 /**
1514 * If true, this driver has transform feedback enabled. The transform
1515 * feedback code requires at least some packing be done even when varying
1516 * packing is disabled, fortunately where transform feedback requires
1517 * packing it's safe to override the disabled setting. See
1518 * is_varying_packing_safe().
1519 */
1520 const bool xfb_enabled;
1521
1522 const bool enhanced_layouts_enabled;
1523
1524 /**
1525 * Enum representing the order in which varyings are packed within a
1526 * packing class.
1527 *
1528 * Currently we pack vec4's first, then vec2's, then scalar values, then
1529 * vec3's. This order ensures that the only vectors that are at risk of
1530 * having to be "double parked" (split between two adjacent varying slots)
1531 * are the vec3's.
1532 */
1533 enum packing_order_enum {
1534 PACKING_ORDER_VEC4,
1535 PACKING_ORDER_VEC2,
1536 PACKING_ORDER_SCALAR,
1537 PACKING_ORDER_VEC3,
1538 };
1539
1540 static unsigned compute_packing_class(const ir_variable *var);
1541 static packing_order_enum compute_packing_order(const ir_variable *var);
1542 static int match_comparator(const void *x_generic, const void *y_generic);
1543 static int xfb_comparator(const void *x_generic, const void *y_generic);
1544
1545 /**
1546 * Structure recording the relationship between a single producer output
1547 * and a single consumer input.
1548 */
1549 struct match {
1550 /**
1551 * Packing class for this varying, computed by compute_packing_class().
1552 */
1553 unsigned packing_class;
1554
1555 /**
1556 * Packing order for this varying, computed by compute_packing_order().
1557 */
1558 packing_order_enum packing_order;
1559 unsigned num_components;
1560
1561 /**
1562 * The output variable in the producer stage.
1563 */
1564 ir_variable *producer_var;
1565
1566 /**
1567 * The input variable in the consumer stage.
1568 */
1569 ir_variable *consumer_var;
1570
1571 /**
1572 * The location which has been assigned for this varying. This is
1573 * expressed in multiples of a float, with the first generic varying
1574 * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
1575 * value 0.
1576 */
1577 unsigned generic_location;
1578 } *matches;
1579
1580 /**
1581 * The number of elements in the \c matches array that are currently in
1582 * use.
1583 */
1584 unsigned num_matches;
1585
1586 /**
1587 * The number of elements that were set aside for the \c matches array when
1588 * it was allocated.
1589 */
1590 unsigned matches_capacity;
1591
1592 gl_shader_stage producer_stage;
1593 gl_shader_stage consumer_stage;
1594 };
1595
1596 } /* anonymous namespace */
1597
1598 varying_matches::varying_matches(bool disable_varying_packing,
1599 bool xfb_enabled,
1600 bool enhanced_layouts_enabled,
1601 gl_shader_stage producer_stage,
1602 gl_shader_stage consumer_stage)
1603 : disable_varying_packing(disable_varying_packing),
1604 xfb_enabled(xfb_enabled),
1605 enhanced_layouts_enabled(enhanced_layouts_enabled),
1606 producer_stage(producer_stage),
1607 consumer_stage(consumer_stage)
1608 {
1609 /* Note: this initial capacity is rather arbitrarily chosen to be large
1610 * enough for many cases without wasting an unreasonable amount of space.
1611 * varying_matches::record() will resize the array if there are more than
1612 * this number of varyings.
1613 */
1614 this->matches_capacity = 8;
1615 this->matches = (match *)
1616 malloc(sizeof(*this->matches) * this->matches_capacity);
1617 this->num_matches = 0;
1618 }
1619
1620
1621 varying_matches::~varying_matches()
1622 {
1623 free(this->matches);
1624 }
1625
1626
1627 /**
1628 * Packing is always safe on individual arrays, structures, and matrices. It
1629 * is also safe if the varying is only used for transform feedback.
1630 */
1631 bool
1632 varying_matches::is_varying_packing_safe(const glsl_type *type,
1633 const ir_variable *var) const
1634 {
1635 if (consumer_stage == MESA_SHADER_TESS_EVAL ||
1636 consumer_stage == MESA_SHADER_TESS_CTRL ||
1637 producer_stage == MESA_SHADER_TESS_CTRL)
1638 return false;
1639
1640 return xfb_enabled && (type->is_array() || type->is_struct() ||
1641 type->is_matrix() || var->data.is_xfb_only);
1642 }
1643
1644
1645 /**
1646 * Record the given producer/consumer variable pair in the list of variables
1647 * that should later be assigned locations.
1648 *
1649 * It is permissible for \c consumer_var to be NULL (this happens if a
1650 * variable is output by the producer and consumed by transform feedback, but
1651 * not consumed by the consumer).
1652 *
1653 * If \c producer_var has already been paired up with a consumer_var, or
1654 * producer_var is part of fixed pipeline functionality (and hence already has
1655 * a location assigned), this function has no effect.
1656 *
1657 * Note: as a side effect this function may change the interpolation type of
1658 * \c producer_var, but only when the change couldn't possibly affect
1659 * rendering.
1660 */
1661 void
1662 varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
1663 {
1664 assert(producer_var != NULL || consumer_var != NULL);
1665
1666 if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
1667 producer_var->data.explicit_location)) ||
1668 (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
1669 consumer_var->data.explicit_location))) {
1670 /* Either a location already exists for this variable (since it is part
1671 * of fixed functionality), or it has already been recorded as part of a
1672 * previous match.
1673 */
1674 return;
1675 }
1676
1677 bool needs_flat_qualifier = consumer_var == NULL &&
1678 (producer_var->type->contains_integer() ||
1679 producer_var->type->contains_double());
1680
1681 if (!disable_varying_packing &&
1682 (needs_flat_qualifier ||
1683 (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
1684 /* Since this varying is not being consumed by the fragment shader, its
1685 * interpolation type varying cannot possibly affect rendering.
1686 * Also, this variable is non-flat and is (or contains) an integer
1687 * or a double.
1688 * If the consumer stage is unknown, don't modify the interpolation
1689 * type as it could affect rendering later with separate shaders.
1690 *
1691 * lower_packed_varyings requires all integer varyings to flat,
1692 * regardless of where they appear. We can trivially satisfy that
1693 * requirement by changing the interpolation type to flat here.
1694 */
1695 if (producer_var) {
1696 producer_var->data.centroid = false;
1697 producer_var->data.sample = false;
1698 producer_var->data.interpolation = INTERP_MODE_FLAT;
1699 }
1700
1701 if (consumer_var) {
1702 consumer_var->data.centroid = false;
1703 consumer_var->data.sample = false;
1704 consumer_var->data.interpolation = INTERP_MODE_FLAT;
1705 }
1706 }
1707
1708 if (this->num_matches == this->matches_capacity) {
1709 this->matches_capacity *= 2;
1710 this->matches = (match *)
1711 realloc(this->matches,
1712 sizeof(*this->matches) * this->matches_capacity);
1713 }
1714
1715 /* We must use the consumer to compute the packing class because in GL4.4+
1716 * there is no guarantee interpolation qualifiers will match across stages.
1717 *
1718 * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
1719 *
1720 * "The type and presence of interpolation qualifiers of variables with
1721 * the same name declared in all linked shaders for the same cross-stage
1722 * interface must match, otherwise the link command will fail.
1723 *
1724 * When comparing an output from one stage to an input of a subsequent
1725 * stage, the input and output don't match if their interpolation
1726 * qualifiers (or lack thereof) are not the same."
1727 *
1728 * This text was also in at least revison 7 of the 4.40 spec but is no
1729 * longer in revision 9 and not in the 4.50 spec.
1730 */
1731 const ir_variable *const var = (consumer_var != NULL)
1732 ? consumer_var : producer_var;
1733 const gl_shader_stage stage = (consumer_var != NULL)
1734 ? consumer_stage : producer_stage;
1735 const glsl_type *type = get_varying_type(var, stage);
1736
1737 if (producer_var && consumer_var &&
1738 consumer_var->data.must_be_shader_input) {
1739 producer_var->data.must_be_shader_input = 1;
1740 }
1741
1742 this->matches[this->num_matches].packing_class
1743 = this->compute_packing_class(var);
1744 this->matches[this->num_matches].packing_order
1745 = this->compute_packing_order(var);
1746 if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
1747 var->data.must_be_shader_input) {
1748 unsigned slots = type->count_attribute_slots(false);
1749 this->matches[this->num_matches].num_components = slots * 4;
1750 } else {
1751 this->matches[this->num_matches].num_components
1752 = type->component_slots();
1753 }
1754
1755 this->matches[this->num_matches].producer_var = producer_var;
1756 this->matches[this->num_matches].consumer_var = consumer_var;
1757 this->num_matches++;
1758 if (producer_var)
1759 producer_var->data.is_unmatched_generic_inout = 0;
1760 if (consumer_var)
1761 consumer_var->data.is_unmatched_generic_inout = 0;
1762 }
1763
1764
1765 /**
1766 * Choose locations for all of the variable matches that were previously
1767 * passed to varying_matches::record().
1768 * \param components returns array[slot] of number of components used
1769 * per slot (1, 2, 3 or 4)
1770 * \param reserved_slots bitmask indicating which varying slots are already
1771 * allocated
1772 * \return number of slots (4-element vectors) allocated
1773 */
1774 unsigned
1775 varying_matches::assign_locations(struct gl_shader_program *prog,
1776 uint8_t components[],
1777 uint64_t reserved_slots)
1778 {
1779 /* If packing has been disabled then we cannot safely sort the varyings by
1780 * class as it may mean we are using a version of OpenGL where
1781 * interpolation qualifiers are not guaranteed to be matching across
1782 * shaders, sorting in this case could result in mismatching shader
1783 * interfaces.
1784 * When packing is disabled the sort orders varyings used by transform
1785 * feedback first, but also depends on *undefined behaviour* of qsort to
1786 * reverse the order of the varyings. See: xfb_comparator().
1787 */
1788 if (!this->disable_varying_packing) {
1789 /* Sort varying matches into an order that makes them easy to pack. */
1790 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1791 &varying_matches::match_comparator);
1792 } else {
1793 /* Only sort varyings that are only used by transform feedback. */
1794 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1795 &varying_matches::xfb_comparator);
1796 }
1797
1798 unsigned generic_location = 0;
1799 unsigned generic_patch_location = MAX_VARYING*4;
1800 bool previous_var_xfb_only = false;
1801 unsigned previous_packing_class = ~0u;
1802
1803 /* For tranform feedback separate mode, we know the number of attributes
1804 * is <= the number of buffers. So packing isn't critical. In fact,
1805 * packing vec3 attributes can cause trouble because splitting a vec3
1806 * effectively creates an additional transform feedback output. The
1807 * extra TFB output may exceed device driver limits.
1808 */
1809 const bool dont_pack_vec3 =
1810 (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
1811 prog->TransformFeedback.NumVarying > 0);
1812
1813 for (unsigned i = 0; i < this->num_matches; i++) {
1814 unsigned *location = &generic_location;
1815 const ir_variable *var;
1816 const glsl_type *type;
1817 bool is_vertex_input = false;
1818
1819 if (matches[i].consumer_var) {
1820 var = matches[i].consumer_var;
1821 type = get_varying_type(var, consumer_stage);
1822 if (consumer_stage == MESA_SHADER_VERTEX)
1823 is_vertex_input = true;
1824 } else {
1825 var = matches[i].producer_var;
1826 type = get_varying_type(var, producer_stage);
1827 }
1828
1829 if (var->data.patch)
1830 location = &generic_patch_location;
1831
1832 /* Advance to the next slot if this varying has a different packing
1833 * class than the previous one, and we're not already on a slot
1834 * boundary.
1835 *
1836 * Also advance to the next slot if packing is disabled. This makes sure
1837 * we don't assign varyings the same locations which is possible
1838 * because we still pack individual arrays, records and matrices even
1839 * when packing is disabled. Note we don't advance to the next slot if
1840 * we can pack varyings together that are only used for transform
1841 * feedback.
1842 */
1843 if (var->data.must_be_shader_input ||
1844 (this->disable_varying_packing &&
1845 !(previous_var_xfb_only && var->data.is_xfb_only)) ||
1846 (previous_packing_class != this->matches[i].packing_class) ||
1847 (this->matches[i].packing_order == PACKING_ORDER_VEC3 &&
1848 dont_pack_vec3)) {
1849 *location = ALIGN(*location, 4);
1850 }
1851
1852 previous_var_xfb_only = var->data.is_xfb_only;
1853 previous_packing_class = this->matches[i].packing_class;
1854
1855 /* The number of components taken up by this variable. For vertex shader
1856 * inputs, we use the number of slots * 4, as they have different
1857 * counting rules.
1858 */
1859 unsigned num_components = is_vertex_input ?
1860 type->count_attribute_slots(is_vertex_input) * 4 :
1861 this->matches[i].num_components;
1862
1863 /* The last slot for this variable, inclusive. */
1864 unsigned slot_end = *location + num_components - 1;
1865
1866 /* FIXME: We could be smarter in the below code and loop back over
1867 * trying to fill any locations that we skipped because we couldn't pack
1868 * the varying between an explicit location. For now just let the user
1869 * hit the linking error if we run out of room and suggest they use
1870 * explicit locations.
1871 */
1872 while (slot_end < MAX_VARYING * 4u) {
1873 const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
1874 const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
1875
1876 assert(slots > 0);
1877
1878 if ((reserved_slots & slot_mask) == 0) {
1879 break;
1880 }
1881
1882 *location = ALIGN(*location + 1, 4);
1883 slot_end = *location + num_components - 1;
1884 }
1885
1886 if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
1887 linker_error(prog, "insufficient contiguous locations available for "
1888 "%s it is possible an array or struct could not be "
1889 "packed between varyings with explicit locations. Try "
1890 "using an explicit location for arrays and structs.",
1891 var->name);
1892 }
1893
1894 if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
1895 for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
1896 components[j] = 4;
1897 components[slot_end / 4u] = (slot_end & 3) + 1;
1898 }
1899
1900 this->matches[i].generic_location = *location;
1901
1902 *location = slot_end + 1;
1903 }
1904
1905 return (generic_location + 3) / 4;
1906 }
1907
1908
1909 /**
1910 * Update the producer and consumer shaders to reflect the locations
1911 * assignments that were made by varying_matches::assign_locations().
1912 */
1913 void
1914 varying_matches::store_locations() const
1915 {
1916 /* Check is location needs to be packed with lower_packed_varyings() or if
1917 * we can just use ARB_enhanced_layouts packing.
1918 */
1919 bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
1920 const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
1921
1922 for (unsigned i = 0; i < this->num_matches; i++) {
1923 ir_variable *producer_var = this->matches[i].producer_var;
1924 ir_variable *consumer_var = this->matches[i].consumer_var;
1925 unsigned generic_location = this->matches[i].generic_location;
1926 unsigned slot = generic_location / 4;
1927 unsigned offset = generic_location % 4;
1928
1929 if (producer_var) {
1930 producer_var->data.location = VARYING_SLOT_VAR0 + slot;
1931 producer_var->data.location_frac = offset;
1932 }
1933
1934 if (consumer_var) {
1935 assert(consumer_var->data.location == -1);
1936 consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
1937 consumer_var->data.location_frac = offset;
1938 }
1939
1940 /* Find locations suitable for native packing via
1941 * ARB_enhanced_layouts.
1942 */
1943 if (producer_var && consumer_var) {
1944 if (enhanced_layouts_enabled) {
1945 const glsl_type *type =
1946 get_varying_type(producer_var, producer_stage);
1947 if (type->is_array() || type->is_matrix() || type->is_struct() ||
1948 type->is_double()) {
1949 unsigned comp_slots = type->component_slots() + offset;
1950 unsigned slots = comp_slots / 4;
1951 if (comp_slots % 4)
1952 slots += 1;
1953
1954 for (unsigned j = 0; j < slots; j++) {
1955 pack_loc[slot + j] = true;
1956 }
1957 } else if (offset + type->vector_elements > 4) {
1958 pack_loc[slot] = true;
1959 pack_loc[slot + 1] = true;
1960 } else {
1961 loc_type[slot][offset] = type;
1962 }
1963 }
1964 }
1965 }
1966
1967 /* Attempt to use ARB_enhanced_layouts for more efficient packing if
1968 * suitable.
1969 */
1970 if (enhanced_layouts_enabled) {
1971 for (unsigned i = 0; i < this->num_matches; i++) {
1972 ir_variable *producer_var = this->matches[i].producer_var;
1973 ir_variable *consumer_var = this->matches[i].consumer_var;
1974 unsigned generic_location = this->matches[i].generic_location;
1975 unsigned slot = generic_location / 4;
1976
1977 if (pack_loc[slot] || !producer_var || !consumer_var)
1978 continue;
1979
1980 const glsl_type *type =
1981 get_varying_type(producer_var, producer_stage);
1982 bool type_match = true;
1983 for (unsigned j = 0; j < 4; j++) {
1984 if (loc_type[slot][j]) {
1985 if (type->base_type != loc_type[slot][j]->base_type)
1986 type_match = false;
1987 }
1988 }
1989
1990 if (type_match) {
1991 producer_var->data.explicit_location = 1;
1992 consumer_var->data.explicit_location = 1;
1993 producer_var->data.explicit_component = 1;
1994 consumer_var->data.explicit_component = 1;
1995 }
1996 }
1997 }
1998 }
1999
2000
2001 /**
2002 * Compute the "packing class" of the given varying. This is an unsigned
2003 * integer with the property that two variables in the same packing class can
2004 * be safely backed into the same vec4.
2005 */
2006 unsigned
2007 varying_matches::compute_packing_class(const ir_variable *var)
2008 {
2009 /* Without help from the back-end, there is no way to pack together
2010 * variables with different interpolation types, because
2011 * lower_packed_varyings must choose exactly one interpolation type for
2012 * each packed varying it creates.
2013 *
2014 * However, we can safely pack together floats, ints, and uints, because:
2015 *
2016 * - varyings of base type "int" and "uint" must use the "flat"
2017 * interpolation type, which can only occur in GLSL 1.30 and above.
2018 *
2019 * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
2020 * can store flat floats as ints without losing any information (using
2021 * the ir_unop_bitcast_* opcodes).
2022 *
2023 * Therefore, the packing class depends only on the interpolation type.
2024 */
2025 const unsigned interp = var->is_interpolation_flat()
2026 ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
2027
2028 assert(interp < (1 << 3));
2029
2030 const unsigned packing_class = (interp << 0) |
2031 (var->data.centroid << 3) |
2032 (var->data.sample << 4) |
2033 (var->data.patch << 5) |
2034 (var->data.must_be_shader_input << 6);
2035
2036 return packing_class;
2037 }
2038
2039
2040 /**
2041 * Compute the "packing order" of the given varying. This is a sort key we
2042 * use to determine when to attempt to pack the given varying relative to
2043 * other varyings in the same packing class.
2044 */
2045 varying_matches::packing_order_enum
2046 varying_matches::compute_packing_order(const ir_variable *var)
2047 {
2048 const glsl_type *element_type = var->type;
2049
2050 while (element_type->is_array()) {
2051 element_type = element_type->fields.array;
2052 }
2053
2054 switch (element_type->component_slots() % 4) {
2055 case 1: return PACKING_ORDER_SCALAR;
2056 case 2: return PACKING_ORDER_VEC2;
2057 case 3: return PACKING_ORDER_VEC3;
2058 case 0: return PACKING_ORDER_VEC4;
2059 default:
2060 assert(!"Unexpected value of vector_elements");
2061 return PACKING_ORDER_VEC4;
2062 }
2063 }
2064
2065
2066 /**
2067 * Comparison function passed to qsort() to sort varyings by packing_class and
2068 * then by packing_order.
2069 */
2070 int
2071 varying_matches::match_comparator(const void *x_generic, const void *y_generic)
2072 {
2073 const match *x = (const match *) x_generic;
2074 const match *y = (const match *) y_generic;
2075
2076 if (x->packing_class != y->packing_class)
2077 return x->packing_class - y->packing_class;
2078 return x->packing_order - y->packing_order;
2079 }
2080
2081
2082 /**
2083 * Comparison function passed to qsort() to sort varyings used only by
2084 * transform feedback when packing of other varyings is disabled.
2085 */
2086 int
2087 varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
2088 {
2089 const match *x = (const match *) x_generic;
2090
2091 if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
2092 return match_comparator(x_generic, y_generic);
2093
2094 /* FIXME: When the comparator returns 0 it means the elements being
2095 * compared are equivalent. However the qsort documentation says:
2096 *
2097 * "The order of equivalent elements is undefined."
2098 *
2099 * In practice the sort ends up reversing the order of the varyings which
2100 * means locations are also assigned in this reversed order and happens to
2101 * be what we want. This is also whats happening in
2102 * varying_matches::match_comparator().
2103 */
2104 return 0;
2105 }
2106
2107
2108 /**
2109 * Is the given variable a varying variable to be counted against the
2110 * limit in ctx->Const.MaxVarying?
2111 * This includes variables such as texcoords, colors and generic
2112 * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
2113 */
2114 static bool
2115 var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
2116 {
2117 /* Only fragment shaders will take a varying variable as an input */
2118 if (stage == MESA_SHADER_FRAGMENT &&
2119 var->data.mode == ir_var_shader_in) {
2120 switch (var->data.location) {
2121 case VARYING_SLOT_POS:
2122 case VARYING_SLOT_FACE:
2123 case VARYING_SLOT_PNTC:
2124 return false;
2125 default:
2126 return true;
2127 }
2128 }
2129 return false;
2130 }
2131
2132
2133 /**
2134 * Visitor class that generates tfeedback_candidate structs describing all
2135 * possible targets of transform feedback.
2136 *
2137 * tfeedback_candidate structs are stored in the hash table
2138 * tfeedback_candidates, which is passed to the constructor. This hash table
2139 * maps varying names to instances of the tfeedback_candidate struct.
2140 */
2141 class tfeedback_candidate_generator : public program_resource_visitor
2142 {
2143 public:
2144 tfeedback_candidate_generator(void *mem_ctx,
2145 hash_table *tfeedback_candidates,
2146 gl_shader_stage stage)
2147 : mem_ctx(mem_ctx),
2148 tfeedback_candidates(tfeedback_candidates),
2149 stage(stage),
2150 toplevel_var(NULL),
2151 varying_floats(0)
2152 {
2153 }
2154
2155 void process(ir_variable *var)
2156 {
2157 /* All named varying interface blocks should be flattened by now */
2158 assert(!var->is_interface_instance());
2159 assert(var->data.mode == ir_var_shader_out);
2160
2161 this->toplevel_var = var;
2162 this->varying_floats = 0;
2163 const glsl_type *t =
2164 var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
2165 if (!var->data.patch && stage == MESA_SHADER_TESS_CTRL) {
2166 assert(t->is_array());
2167 t = t->fields.array;
2168 }
2169 program_resource_visitor::process(var, t, false);
2170 }
2171
2172 private:
2173 virtual void visit_field(const glsl_type *type, const char *name,
2174 bool /* row_major */,
2175 const glsl_type * /* record_type */,
2176 const enum glsl_interface_packing,
2177 bool /* last_field */)
2178 {
2179 assert(!type->without_array()->is_struct());
2180 assert(!type->without_array()->is_interface());
2181
2182 tfeedback_candidate *candidate
2183 = rzalloc(this->mem_ctx, tfeedback_candidate);
2184 candidate->toplevel_var = this->toplevel_var;
2185 candidate->type = type;
2186 candidate->offset = this->varying_floats;
2187 _mesa_hash_table_insert(this->tfeedback_candidates,
2188 ralloc_strdup(this->mem_ctx, name),
2189 candidate);
2190 this->varying_floats += type->component_slots();
2191 }
2192
2193 /**
2194 * Memory context used to allocate hash table keys and values.
2195 */
2196 void * const mem_ctx;
2197
2198 /**
2199 * Hash table in which tfeedback_candidate objects should be stored.
2200 */
2201 hash_table * const tfeedback_candidates;
2202
2203 gl_shader_stage stage;
2204
2205 /**
2206 * Pointer to the toplevel variable that is being traversed.
2207 */
2208 ir_variable *toplevel_var;
2209
2210 /**
2211 * Total number of varying floats that have been visited so far. This is
2212 * used to determine the offset to each varying within the toplevel
2213 * variable.
2214 */
2215 unsigned varying_floats;
2216 };
2217
2218
2219 namespace linker {
2220
2221 void
2222 populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
2223 hash_table *consumer_inputs,
2224 hash_table *consumer_interface_inputs,
2225 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2226 {
2227 memset(consumer_inputs_with_locations,
2228 0,
2229 sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
2230
2231 foreach_in_list(ir_instruction, node, ir) {
2232 ir_variable *const input_var = node->as_variable();
2233
2234 if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
2235 /* All interface blocks should have been lowered by this point */
2236 assert(!input_var->type->is_interface());
2237
2238 if (input_var->data.explicit_location) {
2239 /* assign_varying_locations only cares about finding the
2240 * ir_variable at the start of a contiguous location block.
2241 *
2242 * - For !producer, consumer_inputs_with_locations isn't used.
2243 *
2244 * - For !consumer, consumer_inputs_with_locations is empty.
2245 *
2246 * For consumer && producer, if you were trying to set some
2247 * ir_variable to the middle of a location block on the other side
2248 * of producer/consumer, cross_validate_outputs_to_inputs() should
2249 * be link-erroring due to either type mismatch or location
2250 * overlaps. If the variables do match up, then they've got a
2251 * matching data.location and you only looked at
2252 * consumer_inputs_with_locations[var->data.location], not any
2253 * following entries for the array/structure.
2254 */
2255 consumer_inputs_with_locations[input_var->data.location] =
2256 input_var;
2257 } else if (input_var->get_interface_type() != NULL) {
2258 char *const iface_field_name =
2259 ralloc_asprintf(mem_ctx, "%s.%s",
2260 input_var->get_interface_type()->without_array()->name,
2261 input_var->name);
2262 _mesa_hash_table_insert(consumer_interface_inputs,
2263 iface_field_name, input_var);
2264 } else {
2265 _mesa_hash_table_insert(consumer_inputs,
2266 ralloc_strdup(mem_ctx, input_var->name),
2267 input_var);
2268 }
2269 }
2270 }
2271 }
2272
2273 /**
2274 * Find a variable from the consumer that "matches" the specified variable
2275 *
2276 * This function only finds inputs with names that match. There is no
2277 * validation (here) that the types, etc. are compatible.
2278 */
2279 ir_variable *
2280 get_matching_input(void *mem_ctx,
2281 const ir_variable *output_var,
2282 hash_table *consumer_inputs,
2283 hash_table *consumer_interface_inputs,
2284 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2285 {
2286 ir_variable *input_var;
2287
2288 if (output_var->data.explicit_location) {
2289 input_var = consumer_inputs_with_locations[output_var->data.location];
2290 } else if (output_var->get_interface_type() != NULL) {
2291 char *const iface_field_name =
2292 ralloc_asprintf(mem_ctx, "%s.%s",
2293 output_var->get_interface_type()->without_array()->name,
2294 output_var->name);
2295 hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
2296 input_var = entry ? (ir_variable *) entry->data : NULL;
2297 } else {
2298 hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
2299 input_var = entry ? (ir_variable *) entry->data : NULL;
2300 }
2301
2302 return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2303 ? NULL : input_var;
2304 }
2305
2306 }
2307
2308 static int
2309 io_variable_cmp(const void *_a, const void *_b)
2310 {
2311 const ir_variable *const a = *(const ir_variable **) _a;
2312 const ir_variable *const b = *(const ir_variable **) _b;
2313
2314 if (a->data.explicit_location && b->data.explicit_location)
2315 return b->data.location - a->data.location;
2316
2317 if (a->data.explicit_location && !b->data.explicit_location)
2318 return 1;
2319
2320 if (!a->data.explicit_location && b->data.explicit_location)
2321 return -1;
2322
2323 return -strcmp(a->name, b->name);
2324 }
2325
2326 /**
2327 * Sort the shader IO variables into canonical order
2328 */
2329 static void
2330 canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
2331 {
2332 ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
2333 unsigned num_variables = 0;
2334
2335 foreach_in_list(ir_instruction, node, ir) {
2336 ir_variable *const var = node->as_variable();
2337
2338 if (var == NULL || var->data.mode != io_mode)
2339 continue;
2340
2341 /* If we have already encountered more I/O variables that could
2342 * successfully link, bail.
2343 */
2344 if (num_variables == ARRAY_SIZE(var_table))
2345 return;
2346
2347 var_table[num_variables++] = var;
2348 }
2349
2350 if (num_variables == 0)
2351 return;
2352
2353 /* Sort the list in reverse order (io_variable_cmp handles this). Later
2354 * we're going to push the variables on to the IR list as a stack, so we
2355 * want the last variable (in canonical order) to be first in the list.
2356 */
2357 qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
2358
2359 /* Remove the variable from it's current location in the IR, and put it at
2360 * the front.
2361 */
2362 for (unsigned i = 0; i < num_variables; i++) {
2363 var_table[i]->remove();
2364 ir->push_head(var_table[i]);
2365 }
2366 }
2367
2368 /**
2369 * Generate a bitfield map of the explicit locations for shader varyings.
2370 *
2371 * Note: For Tessellation shaders we are sitting right on the limits of the
2372 * 64 bit map. Per-vertex and per-patch both have separate location domains
2373 * with a max of MAX_VARYING.
2374 */
2375 static uint64_t
2376 reserved_varying_slot(struct gl_linked_shader *stage,
2377 ir_variable_mode io_mode)
2378 {
2379 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
2380 /* Avoid an overflow of the returned value */
2381 assert(MAX_VARYINGS_INCL_PATCH <= 64);
2382
2383 uint64_t slots = 0;
2384 int var_slot;
2385
2386 if (!stage)
2387 return slots;
2388
2389 foreach_in_list(ir_instruction, node, stage->ir) {
2390 ir_variable *const var = node->as_variable();
2391
2392 if (var == NULL || var->data.mode != io_mode ||
2393 !var->data.explicit_location ||
2394 var->data.location < VARYING_SLOT_VAR0)
2395 continue;
2396
2397 var_slot = var->data.location - VARYING_SLOT_VAR0;
2398
2399 unsigned num_elements = get_varying_type(var, stage->Stage)
2400 ->count_attribute_slots(io_mode == ir_var_shader_in &&
2401 stage->Stage == MESA_SHADER_VERTEX);
2402 for (unsigned i = 0; i < num_elements; i++) {
2403 if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
2404 slots |= UINT64_C(1) << var_slot;
2405 var_slot += 1;
2406 }
2407 }
2408
2409 return slots;
2410 }
2411
2412
2413 /**
2414 * Assign locations for all variables that are produced in one pipeline stage
2415 * (the "producer") and consumed in the next stage (the "consumer").
2416 *
2417 * Variables produced by the producer may also be consumed by transform
2418 * feedback.
2419 *
2420 * \param num_tfeedback_decls is the number of declarations indicating
2421 * variables that may be consumed by transform feedback.
2422 *
2423 * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
2424 * representing the result of parsing the strings passed to
2425 * glTransformFeedbackVaryings(). assign_location() will be called for
2426 * each of these objects that matches one of the outputs of the
2427 * producer.
2428 *
2429 * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
2430 * be NULL. In this case, varying locations are assigned solely based on the
2431 * requirements of transform feedback.
2432 */
2433 static bool
2434 assign_varying_locations(struct gl_context *ctx,
2435 void *mem_ctx,
2436 struct gl_shader_program *prog,
2437 gl_linked_shader *producer,
2438 gl_linked_shader *consumer,
2439 unsigned num_tfeedback_decls,
2440 tfeedback_decl *tfeedback_decls,
2441 const uint64_t reserved_slots)
2442 {
2443 /* Tessellation shaders treat inputs and outputs as shared memory and can
2444 * access inputs and outputs of other invocations.
2445 * Therefore, they can't be lowered to temps easily (and definitely not
2446 * efficiently).
2447 */
2448 bool unpackable_tess =
2449 (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
2450 (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
2451 (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
2452
2453 /* Transform feedback code assumes varying arrays are packed, so if the
2454 * driver has disabled varying packing, make sure to at least enable
2455 * packing required by transform feedback.
2456 */
2457 bool xfb_enabled =
2458 ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
2459
2460 /* Disable packing on outward facing interfaces for SSO because in ES we
2461 * need to retain the unpacked varying information for draw time
2462 * validation.
2463 *
2464 * Packing is still enabled on individual arrays, structs, and matrices as
2465 * these are required by the transform feedback code and it is still safe
2466 * to do so. We also enable packing when a varying is only used for
2467 * transform feedback and its not a SSO.
2468 */
2469 bool disable_varying_packing =
2470 ctx->Const.DisableVaryingPacking || unpackable_tess;
2471 if (prog->SeparateShader && (producer == NULL || consumer == NULL))
2472 disable_varying_packing = true;
2473
2474 varying_matches matches(disable_varying_packing, xfb_enabled,
2475 ctx->Extensions.ARB_enhanced_layouts,
2476 producer ? producer->Stage : MESA_SHADER_NONE,
2477 consumer ? consumer->Stage : MESA_SHADER_NONE);
2478 hash_table *tfeedback_candidates =
2479 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2480 _mesa_key_string_equal);
2481 hash_table *consumer_inputs =
2482 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2483 _mesa_key_string_equal);
2484 hash_table *consumer_interface_inputs =
2485 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2486 _mesa_key_string_equal);
2487 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
2488 NULL,
2489 };
2490
2491 unsigned consumer_vertices = 0;
2492 if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
2493 consumer_vertices = prog->Geom.VerticesIn;
2494
2495 /* Operate in a total of four passes.
2496 *
2497 * 1. Sort inputs / outputs into a canonical order. This is necessary so
2498 * that inputs / outputs of separable shaders will be assigned
2499 * predictable locations regardless of the order in which declarations
2500 * appeared in the shader source.
2501 *
2502 * 2. Assign locations for any matching inputs and outputs.
2503 *
2504 * 3. Mark output variables in the producer that do not have locations as
2505 * not being outputs. This lets the optimizer eliminate them.
2506 *
2507 * 4. Mark input variables in the consumer that do not have locations as
2508 * not being inputs. This lets the optimizer eliminate them.
2509 */
2510 if (consumer)
2511 canonicalize_shader_io(consumer->ir, ir_var_shader_in);
2512
2513 if (producer)
2514 canonicalize_shader_io(producer->ir, ir_var_shader_out);
2515
2516 if (consumer)
2517 linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
2518 consumer_inputs,
2519 consumer_interface_inputs,
2520 consumer_inputs_with_locations);
2521
2522 if (producer) {
2523 foreach_in_list(ir_instruction, node, producer->ir) {
2524 ir_variable *const output_var = node->as_variable();
2525
2526 if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
2527 continue;
2528
2529 /* Only geometry shaders can use non-zero streams */
2530 assert(output_var->data.stream == 0 ||
2531 (output_var->data.stream < MAX_VERTEX_STREAMS &&
2532 producer->Stage == MESA_SHADER_GEOMETRY));
2533
2534 if (num_tfeedback_decls > 0) {
2535 tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates, producer->Stage);
2536 /* From OpenGL 4.6 (Core Profile) spec, section 11.1.2.1
2537 * ("Vertex Shader Variables / Output Variables")
2538 *
2539 * "Each program object can specify a set of output variables from
2540 * one shader to be recorded in transform feedback mode (see
2541 * section 13.3). The variables that can be recorded are those
2542 * emitted by the first active shader, in order, from the
2543 * following list:
2544 *
2545 * * geometry shader
2546 * * tessellation evaluation shader
2547 * * tessellation control shader
2548 * * vertex shader"
2549 *
2550 * But on OpenGL ES 3.2, section 11.1.2.1 ("Vertex Shader
2551 * Variables / Output Variables") tessellation control shader is
2552 * not included in the stages list.
2553 */
2554 if (!prog->IsES || producer->Stage != MESA_SHADER_TESS_CTRL) {
2555 g.process(output_var);
2556 }
2557 }
2558
2559 ir_variable *const input_var =
2560 linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
2561 consumer_interface_inputs,
2562 consumer_inputs_with_locations);
2563
2564 /* If a matching input variable was found, add this output (and the
2565 * input) to the set. If this is a separable program and there is no
2566 * consumer stage, add the output.
2567 *
2568 * Always add TCS outputs. They are shared by all invocations
2569 * within a patch and can be used as shared memory.
2570 */
2571 if (input_var || (prog->SeparateShader && consumer == NULL) ||
2572 producer->Stage == MESA_SHADER_TESS_CTRL) {
2573 matches.record(output_var, input_var);
2574 }
2575
2576 /* Only stream 0 outputs can be consumed in the next stage */
2577 if (input_var && output_var->data.stream != 0) {
2578 linker_error(prog, "output %s is assigned to stream=%d but "
2579 "is linked to an input, which requires stream=0",
2580 output_var->name, output_var->data.stream);
2581 return false;
2582 }
2583 }
2584 } else {
2585 /* If there's no producer stage, then this must be a separable program.
2586 * For example, we may have a program that has just a fragment shader.
2587 * Later this program will be used with some arbitrary vertex (or
2588 * geometry) shader program. This means that locations must be assigned
2589 * for all the inputs.
2590 */
2591 foreach_in_list(ir_instruction, node, consumer->ir) {
2592 ir_variable *const input_var = node->as_variable();
2593 if (input_var && input_var->data.mode == ir_var_shader_in) {
2594 matches.record(NULL, input_var);
2595 }
2596 }
2597 }
2598
2599 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2600 if (!tfeedback_decls[i].is_varying())
2601 continue;
2602
2603 const tfeedback_candidate *matched_candidate
2604 = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
2605
2606 if (matched_candidate == NULL) {
2607 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2608 return false;
2609 }
2610
2611 /* Mark xfb varyings as always active */
2612 matched_candidate->toplevel_var->data.always_active_io = 1;
2613
2614 /* Mark any corresponding inputs as always active also. We must do this
2615 * because we have a NIR pass that lowers vectors to scalars and another
2616 * that removes unused varyings.
2617 * We don't split varyings marked as always active because there is no
2618 * point in doing so. This means we need to mark both sides of the
2619 * interface as always active otherwise we will have a mismatch and
2620 * start removing things we shouldn't.
2621 */
2622 ir_variable *const input_var =
2623 linker::get_matching_input(mem_ctx, matched_candidate->toplevel_var,
2624 consumer_inputs,
2625 consumer_interface_inputs,
2626 consumer_inputs_with_locations);
2627 if (input_var)
2628 input_var->data.always_active_io = 1;
2629
2630 if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
2631 matched_candidate->toplevel_var->data.is_xfb_only = 1;
2632 matches.record(matched_candidate->toplevel_var, NULL);
2633 }
2634 }
2635
2636 _mesa_hash_table_destroy(consumer_inputs, NULL);
2637 _mesa_hash_table_destroy(consumer_interface_inputs, NULL);
2638
2639 uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
2640 const unsigned slots_used = matches.assign_locations(
2641 prog, components, reserved_slots);
2642 matches.store_locations();
2643
2644 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2645 if (tfeedback_decls[i].is_varying()) {
2646 if (!tfeedback_decls[i].assign_location(ctx, prog)) {
2647 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2648 return false;
2649 }
2650 }
2651 }
2652 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2653
2654 if (consumer && producer) {
2655 foreach_in_list(ir_instruction, node, consumer->ir) {
2656 ir_variable *const var = node->as_variable();
2657
2658 if (var && var->data.mode == ir_var_shader_in &&
2659 var->data.is_unmatched_generic_inout) {
2660 if (!prog->IsES && prog->data->Version <= 120) {
2661 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
2662 *
2663 * Only those varying variables used (i.e. read) in
2664 * the fragment shader executable must be written to
2665 * by the vertex shader executable; declaring
2666 * superfluous varying variables in a vertex shader is
2667 * permissible.
2668 *
2669 * We interpret this text as meaning that the VS must
2670 * write the variable for the FS to read it. See
2671 * "glsl1-varying read but not written" in piglit.
2672 */
2673 linker_error(prog, "%s shader varying %s not written "
2674 "by %s shader\n.",
2675 _mesa_shader_stage_to_string(consumer->Stage),
2676 var->name,
2677 _mesa_shader_stage_to_string(producer->Stage));
2678 } else {
2679 linker_warning(prog, "%s shader varying %s not written "
2680 "by %s shader\n.",
2681 _mesa_shader_stage_to_string(consumer->Stage),
2682 var->name,
2683 _mesa_shader_stage_to_string(producer->Stage));
2684 }
2685 }
2686 }
2687
2688 /* Now that validation is done its safe to remove unused varyings. As
2689 * we have both a producer and consumer its safe to remove unused
2690 * varyings even if the program is a SSO because the stages are being
2691 * linked together i.e. we have a multi-stage SSO.
2692 */
2693 remove_unused_shader_inputs_and_outputs(false, producer,
2694 ir_var_shader_out);
2695 remove_unused_shader_inputs_and_outputs(false, consumer,
2696 ir_var_shader_in);
2697 }
2698
2699 if (producer) {
2700 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
2701 0, producer, disable_varying_packing,
2702 xfb_enabled);
2703 }
2704
2705 if (consumer) {
2706 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
2707 consumer_vertices, consumer,
2708 disable_varying_packing, xfb_enabled);
2709 }
2710
2711 return true;
2712 }
2713
2714 static bool
2715 check_against_output_limit(struct gl_context *ctx,
2716 struct gl_shader_program *prog,
2717 gl_linked_shader *producer,
2718 unsigned num_explicit_locations)
2719 {
2720 unsigned output_vectors = num_explicit_locations;
2721
2722 foreach_in_list(ir_instruction, node, producer->ir) {
2723 ir_variable *const var = node->as_variable();
2724
2725 if (var && !var->data.explicit_location &&
2726 var->data.mode == ir_var_shader_out &&
2727 var_counts_against_varying_limit(producer->Stage, var)) {
2728 /* outputs for fragment shader can't be doubles */
2729 output_vectors += var->type->count_attribute_slots(false);
2730 }
2731 }
2732
2733 assert(producer->Stage != MESA_SHADER_FRAGMENT);
2734 unsigned max_output_components =
2735 ctx->Const.Program[producer->Stage].MaxOutputComponents;
2736
2737 const unsigned output_components = output_vectors * 4;
2738 if (output_components > max_output_components) {
2739 if (ctx->API == API_OPENGLES2 || prog->IsES)
2740 linker_error(prog, "%s shader uses too many output vectors "
2741 "(%u > %u)\n",
2742 _mesa_shader_stage_to_string(producer->Stage),
2743 output_vectors,
2744 max_output_components / 4);
2745 else
2746 linker_error(prog, "%s shader uses too many output components "
2747 "(%u > %u)\n",
2748 _mesa_shader_stage_to_string(producer->Stage),
2749 output_components,
2750 max_output_components);
2751
2752 return false;
2753 }
2754
2755 return true;
2756 }
2757
2758 static bool
2759 check_against_input_limit(struct gl_context *ctx,
2760 struct gl_shader_program *prog,
2761 gl_linked_shader *consumer,
2762 unsigned num_explicit_locations)
2763 {
2764 unsigned input_vectors = num_explicit_locations;
2765
2766 foreach_in_list(ir_instruction, node, consumer->ir) {
2767 ir_variable *const var = node->as_variable();
2768
2769 if (var && !var->data.explicit_location &&
2770 var->data.mode == ir_var_shader_in &&
2771 var_counts_against_varying_limit(consumer->Stage, var)) {
2772 /* vertex inputs aren't varying counted */
2773 input_vectors += var->type->count_attribute_slots(false);
2774 }
2775 }
2776
2777 assert(consumer->Stage != MESA_SHADER_VERTEX);
2778 unsigned max_input_components =
2779 ctx->Const.Program[consumer->Stage].MaxInputComponents;
2780
2781 const unsigned input_components = input_vectors * 4;
2782 if (input_components > max_input_components) {
2783 if (ctx->API == API_OPENGLES2 || prog->IsES)
2784 linker_error(prog, "%s shader uses too many input vectors "
2785 "(%u > %u)\n",
2786 _mesa_shader_stage_to_string(consumer->Stage),
2787 input_vectors,
2788 max_input_components / 4);
2789 else
2790 linker_error(prog, "%s shader uses too many input components "
2791 "(%u > %u)\n",
2792 _mesa_shader_stage_to_string(consumer->Stage),
2793 input_components,
2794 max_input_components);
2795
2796 return false;
2797 }
2798
2799 return true;
2800 }
2801
2802 bool
2803 link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
2804 struct gl_context *ctx, void *mem_ctx)
2805 {
2806 bool has_xfb_qualifiers = false;
2807 unsigned num_tfeedback_decls = 0;
2808 char **varying_names = NULL;
2809 tfeedback_decl *tfeedback_decls = NULL;
2810
2811 /* From the ARB_enhanced_layouts spec:
2812 *
2813 * "If the shader used to record output variables for transform feedback
2814 * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
2815 * qualifiers, the values specified by TransformFeedbackVaryings are
2816 * ignored, and the set of variables captured for transform feedback is
2817 * instead derived from the specified layout qualifiers."
2818 */
2819 for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
2820 /* Find last stage before fragment shader */
2821 if (prog->_LinkedShaders[i]) {
2822 has_xfb_qualifiers =
2823 process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
2824 prog, &num_tfeedback_decls,
2825 &varying_names);
2826 break;
2827 }
2828 }
2829
2830 if (!has_xfb_qualifiers) {
2831 num_tfeedback_decls = prog->TransformFeedback.NumVarying;
2832 varying_names = prog->TransformFeedback.VaryingNames;
2833 }
2834
2835 if (num_tfeedback_decls != 0) {
2836 /* From GL_EXT_transform_feedback:
2837 * A program will fail to link if:
2838 *
2839 * * the <count> specified by TransformFeedbackVaryingsEXT is
2840 * non-zero, but the program object has no vertex or geometry
2841 * shader;
2842 */
2843 if (first >= MESA_SHADER_FRAGMENT) {
2844 linker_error(prog, "Transform feedback varyings specified, but "
2845 "no vertex, tessellation, or geometry shader is "
2846 "present.\n");
2847 return false;
2848 }
2849
2850 tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
2851 num_tfeedback_decls);
2852 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2853 varying_names, tfeedback_decls))
2854 return false;
2855 }
2856
2857 /* If there is no fragment shader we need to set transform feedback.
2858 *
2859 * For SSO we also need to assign output locations. We assign them here
2860 * because we need to do it for both single stage programs and multi stage
2861 * programs.
2862 */
2863 if (last < MESA_SHADER_FRAGMENT &&
2864 (num_tfeedback_decls != 0 || prog->SeparateShader)) {
2865 const uint64_t reserved_out_slots =
2866 reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
2867 if (!assign_varying_locations(ctx, mem_ctx, prog,
2868 prog->_LinkedShaders[last], NULL,
2869 num_tfeedback_decls, tfeedback_decls,
2870 reserved_out_slots))
2871 return false;
2872 }
2873
2874 if (last <= MESA_SHADER_FRAGMENT) {
2875 /* Remove unused varyings from the first/last stage unless SSO */
2876 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2877 prog->_LinkedShaders[first],
2878 ir_var_shader_in);
2879 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2880 prog->_LinkedShaders[last],
2881 ir_var_shader_out);
2882
2883 /* If the program is made up of only a single stage */
2884 if (first == last) {
2885 gl_linked_shader *const sh = prog->_LinkedShaders[last];
2886
2887 do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
2888 do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
2889 tfeedback_decls);
2890
2891 if (prog->SeparateShader) {
2892 const uint64_t reserved_slots =
2893 reserved_varying_slot(sh, ir_var_shader_in);
2894
2895 /* Assign input locations for SSO, output locations are already
2896 * assigned.
2897 */
2898 if (!assign_varying_locations(ctx, mem_ctx, prog,
2899 NULL /* producer */,
2900 sh /* consumer */,
2901 0 /* num_tfeedback_decls */,
2902 NULL /* tfeedback_decls */,
2903 reserved_slots))
2904 return false;
2905 }
2906 } else {
2907 /* Linking the stages in the opposite order (from fragment to vertex)
2908 * ensures that inter-shader outputs written to in an earlier stage
2909 * are eliminated if they are (transitively) not used in a later
2910 * stage.
2911 */
2912 int next = last;
2913 for (int i = next - 1; i >= 0; i--) {
2914 if (prog->_LinkedShaders[i] == NULL && i != 0)
2915 continue;
2916
2917 gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
2918 gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
2919
2920 const uint64_t reserved_out_slots =
2921 reserved_varying_slot(sh_i, ir_var_shader_out);
2922 const uint64_t reserved_in_slots =
2923 reserved_varying_slot(sh_next, ir_var_shader_in);
2924
2925 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2926 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2927 tfeedback_decls);
2928
2929 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2930 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2931 tfeedback_decls,
2932 reserved_out_slots | reserved_in_slots))
2933 return false;
2934
2935 /* This must be done after all dead varyings are eliminated. */
2936 if (sh_i != NULL) {
2937 unsigned slots_used = util_bitcount64(reserved_out_slots);
2938 if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
2939 return false;
2940 }
2941 }
2942
2943 unsigned slots_used = util_bitcount64(reserved_in_slots);
2944 if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
2945 return false;
2946
2947 next = i;
2948 }
2949 }
2950 }
2951
2952 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
2953 has_xfb_qualifiers))
2954 return false;
2955
2956 return true;
2957 }