mesa: include mtypes.h less
[mesa.git] / src / compiler / glsl / link_varyings.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file link_varyings.cpp
26 *
27 * Linker functions related specifically to linking varyings between shader
28 * stages.
29 */
30
31
32 #include "main/errors.h"
33 #include "main/mtypes.h"
34 #include "glsl_symbol_table.h"
35 #include "glsl_parser_extras.h"
36 #include "ir_optimization.h"
37 #include "linker.h"
38 #include "link_varyings.h"
39 #include "main/macros.h"
40 #include "util/hash_table.h"
41 #include "program.h"
42
43
44 /**
45 * Get the varying type stripped of the outermost array if we're processing
46 * a stage whose varyings are arrays indexed by a vertex number (such as
47 * geometry shader inputs).
48 */
49 static const glsl_type *
50 get_varying_type(const ir_variable *var, gl_shader_stage stage)
51 {
52 const glsl_type *type = var->type;
53
54 if (!var->data.patch &&
55 ((var->data.mode == ir_var_shader_out &&
56 stage == MESA_SHADER_TESS_CTRL) ||
57 (var->data.mode == ir_var_shader_in &&
58 (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
59 stage == MESA_SHADER_GEOMETRY)))) {
60 assert(type->is_array());
61 type = type->fields.array;
62 }
63
64 return type;
65 }
66
67 static void
68 create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
69 size_t name_length, unsigned *count,
70 const char *ifc_member_name,
71 const glsl_type *ifc_member_t, char ***varying_names)
72 {
73 if (t->is_interface()) {
74 size_t new_length = name_length;
75
76 assert(ifc_member_name && ifc_member_t);
77 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
78
79 create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
80 NULL, NULL, varying_names);
81 } else if (t->is_record()) {
82 for (unsigned i = 0; i < t->length; i++) {
83 const char *field = t->fields.structure[i].name;
84 size_t new_length = name_length;
85
86 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
87
88 create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
89 new_length, count, NULL, NULL,
90 varying_names);
91 }
92 } else if (t->without_array()->is_record() ||
93 t->without_array()->is_interface() ||
94 (t->is_array() && t->fields.array->is_array())) {
95 for (unsigned i = 0; i < t->length; i++) {
96 size_t new_length = name_length;
97
98 /* Append the subscript to the current variable name */
99 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
100
101 create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
102 count, ifc_member_name, ifc_member_t,
103 varying_names);
104 }
105 } else {
106 (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
107 }
108 }
109
110 static bool
111 process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
112 struct gl_shader_program *prog,
113 unsigned *num_tfeedback_decls,
114 char ***varying_names)
115 {
116 bool has_xfb_qualifiers = false;
117
118 /* We still need to enable transform feedback mode even if xfb_stride is
119 * only applied to a global out. Also we don't bother to propagate
120 * xfb_stride to interface block members so this will catch that case also.
121 */
122 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
123 if (prog->TransformFeedback.BufferStride[j]) {
124 has_xfb_qualifiers = true;
125 break;
126 }
127 }
128
129 foreach_in_list(ir_instruction, node, sh->ir) {
130 ir_variable *var = node->as_variable();
131 if (!var || var->data.mode != ir_var_shader_out)
132 continue;
133
134 /* From the ARB_enhanced_layouts spec:
135 *
136 * "Any shader making any static use (after preprocessing) of any of
137 * these *xfb_* qualifiers will cause the shader to be in a
138 * transform feedback capturing mode and hence responsible for
139 * describing the transform feedback setup. This mode will capture
140 * any output selected by *xfb_offset*, directly or indirectly, to
141 * a transform feedback buffer."
142 */
143 if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
144 has_xfb_qualifiers = true;
145 }
146
147 if (var->data.explicit_xfb_offset) {
148 *num_tfeedback_decls += var->type->varying_count();
149 has_xfb_qualifiers = true;
150 }
151 }
152
153 if (*num_tfeedback_decls == 0)
154 return has_xfb_qualifiers;
155
156 unsigned i = 0;
157 *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
158 foreach_in_list(ir_instruction, node, sh->ir) {
159 ir_variable *var = node->as_variable();
160 if (!var || var->data.mode != ir_var_shader_out)
161 continue;
162
163 if (var->data.explicit_xfb_offset) {
164 char *name;
165 const glsl_type *type, *member_type;
166
167 if (var->data.from_named_ifc_block) {
168 type = var->get_interface_type();
169
170 /* Find the member type before it was altered by lowering */
171 const glsl_type *type_wa = type->without_array();
172 member_type =
173 type_wa->fields.structure[type_wa->field_index(var->name)].type;
174 name = ralloc_strdup(NULL, type_wa->name);
175 } else {
176 type = var->type;
177 member_type = NULL;
178 name = ralloc_strdup(NULL, var->name);
179 }
180 create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
181 var->name, member_type, varying_names);
182 ralloc_free(name);
183 }
184 }
185
186 assert(i == *num_tfeedback_decls);
187 return has_xfb_qualifiers;
188 }
189
190 /**
191 * Validate the types and qualifiers of an output from one stage against the
192 * matching input to another stage.
193 */
194 static void
195 cross_validate_types_and_qualifiers(struct gl_context *ctx,
196 struct gl_shader_program *prog,
197 const ir_variable *input,
198 const ir_variable *output,
199 gl_shader_stage consumer_stage,
200 gl_shader_stage producer_stage)
201 {
202 /* Check that the types match between stages.
203 */
204 const glsl_type *type_to_match = input->type;
205
206 /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
207 const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
208 consumer_stage != MESA_SHADER_FRAGMENT) ||
209 consumer_stage == MESA_SHADER_GEOMETRY;
210 if (extra_array_level) {
211 assert(type_to_match->is_array());
212 type_to_match = type_to_match->fields.array;
213 }
214
215 if (type_to_match != output->type) {
216 /* There is a bit of a special case for gl_TexCoord. This
217 * built-in is unsized by default. Applications that variable
218 * access it must redeclare it with a size. There is some
219 * language in the GLSL spec that implies the fragment shader
220 * and vertex shader do not have to agree on this size. Other
221 * driver behave this way, and one or two applications seem to
222 * rely on it.
223 *
224 * Neither declaration needs to be modified here because the array
225 * sizes are fixed later when update_array_sizes is called.
226 *
227 * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
228 *
229 * "Unlike user-defined varying variables, the built-in
230 * varying variables don't have a strict one-to-one
231 * correspondence between the vertex language and the
232 * fragment language."
233 */
234 if (!output->type->is_array() || !is_gl_identifier(output->name)) {
235 linker_error(prog,
236 "%s shader output `%s' declared as type `%s', "
237 "but %s shader input declared as type `%s'\n",
238 _mesa_shader_stage_to_string(producer_stage),
239 output->name,
240 output->type->name,
241 _mesa_shader_stage_to_string(consumer_stage),
242 input->type->name);
243 return;
244 }
245 }
246
247 /* Check that all of the qualifiers match between stages.
248 */
249
250 /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
251 * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
252 * conformance test suite does not verify that the qualifiers must match.
253 * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
254 * OpenGLES 3.0 drivers, so we relax the checking in all cases.
255 */
256 if (false /* always skip the centroid check */ &&
257 prog->data->Version < (prog->IsES ? 310 : 430) &&
258 input->data.centroid != output->data.centroid) {
259 linker_error(prog,
260 "%s shader output `%s' %s centroid qualifier, "
261 "but %s shader input %s centroid qualifier\n",
262 _mesa_shader_stage_to_string(producer_stage),
263 output->name,
264 (output->data.centroid) ? "has" : "lacks",
265 _mesa_shader_stage_to_string(consumer_stage),
266 (input->data.centroid) ? "has" : "lacks");
267 return;
268 }
269
270 if (input->data.sample != output->data.sample) {
271 linker_error(prog,
272 "%s shader output `%s' %s sample qualifier, "
273 "but %s shader input %s sample qualifier\n",
274 _mesa_shader_stage_to_string(producer_stage),
275 output->name,
276 (output->data.sample) ? "has" : "lacks",
277 _mesa_shader_stage_to_string(consumer_stage),
278 (input->data.sample) ? "has" : "lacks");
279 return;
280 }
281
282 if (input->data.patch != output->data.patch) {
283 linker_error(prog,
284 "%s shader output `%s' %s patch qualifier, "
285 "but %s shader input %s patch qualifier\n",
286 _mesa_shader_stage_to_string(producer_stage),
287 output->name,
288 (output->data.patch) ? "has" : "lacks",
289 _mesa_shader_stage_to_string(consumer_stage),
290 (input->data.patch) ? "has" : "lacks");
291 return;
292 }
293
294 /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
295 *
296 * "As only outputs need be declared with invariant, an output from
297 * one shader stage will still match an input of a subsequent stage
298 * without the input being declared as invariant."
299 *
300 * while GLSL 4.20 says:
301 *
302 * "For variables leaving one shader and coming into another shader,
303 * the invariant keyword has to be used in both shaders, or a link
304 * error will result."
305 *
306 * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
307 *
308 * "The invariance of varyings that are declared in both the vertex
309 * and fragment shaders must match."
310 */
311 if (input->data.invariant != output->data.invariant &&
312 prog->data->Version < (prog->IsES ? 300 : 430)) {
313 linker_error(prog,
314 "%s shader output `%s' %s invariant qualifier, "
315 "but %s shader input %s invariant qualifier\n",
316 _mesa_shader_stage_to_string(producer_stage),
317 output->name,
318 (output->data.invariant) ? "has" : "lacks",
319 _mesa_shader_stage_to_string(consumer_stage),
320 (input->data.invariant) ? "has" : "lacks");
321 return;
322 }
323
324 /* GLSL >= 4.40 removes text requiring interpolation qualifiers
325 * to match cross stage, they must only match within the same stage.
326 *
327 * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
328 *
329 * "It is a link-time error if, within the same stage, the interpolation
330 * qualifiers of variables of the same name do not match.
331 *
332 * Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
333 *
334 * "When no interpolation qualifier is present, smooth interpolation
335 * is used."
336 *
337 * So we match variables where one is smooth and the other has no explicit
338 * qualifier.
339 */
340 unsigned input_interpolation = input->data.interpolation;
341 unsigned output_interpolation = output->data.interpolation;
342 if (prog->IsES) {
343 if (input_interpolation == INTERP_MODE_NONE)
344 input_interpolation = INTERP_MODE_SMOOTH;
345 if (output_interpolation == INTERP_MODE_NONE)
346 output_interpolation = INTERP_MODE_SMOOTH;
347 }
348 if (input_interpolation != output_interpolation &&
349 prog->data->Version < 440) {
350 if (!ctx->Const.AllowGLSLCrossStageInterpolationMismatch) {
351 linker_error(prog,
352 "%s shader output `%s' specifies %s "
353 "interpolation qualifier, "
354 "but %s shader input specifies %s "
355 "interpolation qualifier\n",
356 _mesa_shader_stage_to_string(producer_stage),
357 output->name,
358 interpolation_string(output->data.interpolation),
359 _mesa_shader_stage_to_string(consumer_stage),
360 interpolation_string(input->data.interpolation));
361 return;
362 } else {
363 linker_warning(prog,
364 "%s shader output `%s' specifies %s "
365 "interpolation qualifier, "
366 "but %s shader input specifies %s "
367 "interpolation qualifier\n",
368 _mesa_shader_stage_to_string(producer_stage),
369 output->name,
370 interpolation_string(output->data.interpolation),
371 _mesa_shader_stage_to_string(consumer_stage),
372 interpolation_string(input->data.interpolation));
373 }
374 }
375 }
376
377 /**
378 * Validate front and back color outputs against single color input
379 */
380 static void
381 cross_validate_front_and_back_color(struct gl_context *ctx,
382 struct gl_shader_program *prog,
383 const ir_variable *input,
384 const ir_variable *front_color,
385 const ir_variable *back_color,
386 gl_shader_stage consumer_stage,
387 gl_shader_stage producer_stage)
388 {
389 if (front_color != NULL && front_color->data.assigned)
390 cross_validate_types_and_qualifiers(ctx, prog, input, front_color,
391 consumer_stage, producer_stage);
392
393 if (back_color != NULL && back_color->data.assigned)
394 cross_validate_types_and_qualifiers(ctx, prog, input, back_color,
395 consumer_stage, producer_stage);
396 }
397
398 static unsigned
399 compute_variable_location_slot(ir_variable *var, gl_shader_stage stage)
400 {
401 unsigned location_start = VARYING_SLOT_VAR0;
402
403 switch (stage) {
404 case MESA_SHADER_VERTEX:
405 if (var->data.mode == ir_var_shader_in)
406 location_start = VERT_ATTRIB_GENERIC0;
407 break;
408 case MESA_SHADER_TESS_CTRL:
409 case MESA_SHADER_TESS_EVAL:
410 if (var->data.patch)
411 location_start = VARYING_SLOT_PATCH0;
412 break;
413 case MESA_SHADER_FRAGMENT:
414 if (var->data.mode == ir_var_shader_out)
415 location_start = FRAG_RESULT_DATA0;
416 break;
417 default:
418 break;
419 }
420
421 return var->data.location - location_start;
422 }
423
424 struct explicit_location_info {
425 ir_variable *var;
426 unsigned numerical_type;
427 unsigned interpolation;
428 bool centroid;
429 bool sample;
430 bool patch;
431 };
432
433 static inline unsigned
434 get_numerical_type(const glsl_type *type)
435 {
436 /* From the OpenGL 4.6 spec, section 4.4.1 Input Layout Qualifiers, Page 68,
437 * (Location aliasing):
438 *
439 * "Further, when location aliasing, the aliases sharing the location
440 * must have the same underlying numerical type (floating-point or
441 * integer)
442 */
443 if (type->is_float() || type->is_double())
444 return GLSL_TYPE_FLOAT;
445 return GLSL_TYPE_INT;
446 }
447
448 static bool
449 check_location_aliasing(struct explicit_location_info explicit_locations[][4],
450 ir_variable *var,
451 unsigned location,
452 unsigned component,
453 unsigned location_limit,
454 const glsl_type *type,
455 unsigned interpolation,
456 bool centroid,
457 bool sample,
458 bool patch,
459 gl_shader_program *prog,
460 gl_shader_stage stage)
461 {
462 unsigned last_comp;
463 if (type->without_array()->is_record()) {
464 /* The component qualifier can't be used on structs so just treat
465 * all component slots as used.
466 */
467 last_comp = 4;
468 } else {
469 unsigned dmul = type->without_array()->is_64bit() ? 2 : 1;
470 last_comp = component + type->without_array()->vector_elements * dmul;
471 }
472
473 while (location < location_limit) {
474 unsigned comp = 0;
475 while (comp < 4) {
476 struct explicit_location_info *info =
477 &explicit_locations[location][comp];
478
479 if (info->var) {
480 /* Component aliasing is not alloed */
481 if (comp >= component && comp < last_comp) {
482 linker_error(prog,
483 "%s shader has multiple outputs explicitly "
484 "assigned to location %d and component %d\n",
485 _mesa_shader_stage_to_string(stage),
486 location, comp);
487 return false;
488 } else {
489 /* For all other used components we need to have matching
490 * types, interpolation and auxiliary storage
491 */
492 if (info->numerical_type !=
493 get_numerical_type(type->without_array())) {
494 linker_error(prog,
495 "Varyings sharing the same location must "
496 "have the same underlying numerical type. "
497 "Location %u component %u\n",
498 location, comp);
499 return false;
500 }
501
502 if (info->interpolation != interpolation) {
503 linker_error(prog,
504 "%s shader has multiple outputs at explicit "
505 "location %u with different interpolation "
506 "settings\n",
507 _mesa_shader_stage_to_string(stage), location);
508 return false;
509 }
510
511 if (info->centroid != centroid ||
512 info->sample != sample ||
513 info->patch != patch) {
514 linker_error(prog,
515 "%s shader has multiple outputs at explicit "
516 "location %u with different aux storage\n",
517 _mesa_shader_stage_to_string(stage), location);
518 return false;
519 }
520 }
521 } else if (comp >= component && comp < last_comp) {
522 info->var = var;
523 info->numerical_type = get_numerical_type(type->without_array());
524 info->interpolation = interpolation;
525 info->centroid = centroid;
526 info->sample = sample;
527 info->patch = patch;
528 }
529
530 comp++;
531
532 /* We need to do some special handling for doubles as dvec3 and
533 * dvec4 consume two consecutive locations. We don't need to
534 * worry about components beginning at anything other than 0 as
535 * the spec does not allow this for dvec3 and dvec4.
536 */
537 if (comp == 4 && last_comp > 4) {
538 last_comp = last_comp - 4;
539 /* Bump location index and reset the component index */
540 location++;
541 comp = 0;
542 component = 0;
543 }
544 }
545
546 location++;
547 }
548
549 return true;
550 }
551
552 static bool
553 validate_explicit_variable_location(struct gl_context *ctx,
554 struct explicit_location_info explicit_locations[][4],
555 ir_variable *var,
556 gl_shader_program *prog,
557 gl_linked_shader *sh)
558 {
559 const glsl_type *type = get_varying_type(var, sh->Stage);
560 unsigned num_elements = type->count_attribute_slots(false);
561 unsigned idx = compute_variable_location_slot(var, sh->Stage);
562 unsigned slot_limit = idx + num_elements;
563
564 /* Vertex shader inputs and fragment shader outputs are validated in
565 * assign_attribute_or_color_locations() so we should not attempt to
566 * validate them again here.
567 */
568 unsigned slot_max;
569 if (var->data.mode == ir_var_shader_out) {
570 assert(sh->Stage != MESA_SHADER_FRAGMENT);
571 slot_max =
572 ctx->Const.Program[sh->Stage].MaxOutputComponents / 4;
573 } else {
574 assert(var->data.mode == ir_var_shader_in);
575 assert(sh->Stage != MESA_SHADER_VERTEX);
576 slot_max =
577 ctx->Const.Program[sh->Stage].MaxInputComponents / 4;
578 }
579
580 if (slot_limit > slot_max) {
581 linker_error(prog,
582 "Invalid location %u in %s shader\n",
583 idx, _mesa_shader_stage_to_string(sh->Stage));
584 return false;
585 }
586
587 const glsl_type *type_without_array = type->without_array();
588 if (type_without_array->is_interface()) {
589 for (unsigned i = 0; i < type_without_array->length; i++) {
590 glsl_struct_field *field = &type_without_array->fields.structure[i];
591 unsigned field_location = field->location -
592 (field->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0);
593 if (!check_location_aliasing(explicit_locations, var,
594 field_location,
595 0, field_location + 1,
596 field->type,
597 field->interpolation,
598 field->centroid,
599 field->sample,
600 field->patch,
601 prog, sh->Stage)) {
602 return false;
603 }
604 }
605 } else if (!check_location_aliasing(explicit_locations, var,
606 idx, var->data.location_frac,
607 slot_limit, type,
608 var->data.interpolation,
609 var->data.centroid,
610 var->data.sample,
611 var->data.patch,
612 prog, sh->Stage)) {
613 return false;
614 }
615
616 return true;
617 }
618
619 /**
620 * Validate explicit locations for the inputs to the first stage and the
621 * outputs of the last stage in an SSO program (everything in between is
622 * validated in cross_validate_outputs_to_inputs).
623 */
624 void
625 validate_sso_explicit_locations(struct gl_context *ctx,
626 struct gl_shader_program *prog,
627 gl_shader_stage first_stage,
628 gl_shader_stage last_stage)
629 {
630 assert(prog->SeparateShader);
631
632 /* VS inputs and FS outputs are validated in
633 * assign_attribute_or_color_locations()
634 */
635 bool validate_first_stage = first_stage != MESA_SHADER_VERTEX;
636 bool validate_last_stage = last_stage != MESA_SHADER_FRAGMENT;
637 if (!validate_first_stage && !validate_last_stage)
638 return;
639
640 struct explicit_location_info explicit_locations[MAX_VARYING][4];
641
642 gl_shader_stage stages[2] = { first_stage, last_stage };
643 bool validate_stage[2] = { validate_first_stage, validate_last_stage };
644 ir_variable_mode var_direction[2] = { ir_var_shader_in, ir_var_shader_out };
645
646 for (unsigned i = 0; i < 2; i++) {
647 if (!validate_stage[i])
648 continue;
649
650 gl_shader_stage stage = stages[i];
651
652 gl_linked_shader *sh = prog->_LinkedShaders[stage];
653 assert(sh);
654
655 memset(explicit_locations, 0, sizeof(explicit_locations));
656
657 foreach_in_list(ir_instruction, node, sh->ir) {
658 ir_variable *const var = node->as_variable();
659
660 if (var == NULL ||
661 !var->data.explicit_location ||
662 var->data.location < VARYING_SLOT_VAR0 ||
663 var->data.mode != var_direction[i])
664 continue;
665
666 if (!validate_explicit_variable_location(
667 ctx, explicit_locations, var, prog, sh)) {
668 return;
669 }
670 }
671 }
672 }
673
674 /**
675 * Validate that outputs from one stage match inputs of another
676 */
677 void
678 cross_validate_outputs_to_inputs(struct gl_context *ctx,
679 struct gl_shader_program *prog,
680 gl_linked_shader *producer,
681 gl_linked_shader *consumer)
682 {
683 glsl_symbol_table parameters;
684 struct explicit_location_info explicit_locations[MAX_VARYING][4] = { 0 };
685
686 /* Find all shader outputs in the "producer" stage.
687 */
688 foreach_in_list(ir_instruction, node, producer->ir) {
689 ir_variable *const var = node->as_variable();
690
691 if (var == NULL || var->data.mode != ir_var_shader_out)
692 continue;
693
694 if (!var->data.explicit_location
695 || var->data.location < VARYING_SLOT_VAR0)
696 parameters.add_variable(var);
697 else {
698 /* User-defined varyings with explicit locations are handled
699 * differently because they do not need to have matching names.
700 */
701 if (!validate_explicit_variable_location(ctx,
702 explicit_locations,
703 var, prog, producer)) {
704 return;
705 }
706 }
707 }
708
709
710 /* Find all shader inputs in the "consumer" stage. Any variables that have
711 * matching outputs already in the symbol table must have the same type and
712 * qualifiers.
713 *
714 * Exception: if the consumer is the geometry shader, then the inputs
715 * should be arrays and the type of the array element should match the type
716 * of the corresponding producer output.
717 */
718 foreach_in_list(ir_instruction, node, consumer->ir) {
719 ir_variable *const input = node->as_variable();
720
721 if (input == NULL || input->data.mode != ir_var_shader_in)
722 continue;
723
724 if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
725 const ir_variable *const front_color =
726 parameters.get_variable("gl_FrontColor");
727
728 const ir_variable *const back_color =
729 parameters.get_variable("gl_BackColor");
730
731 cross_validate_front_and_back_color(ctx, prog, input,
732 front_color, back_color,
733 consumer->Stage, producer->Stage);
734 } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
735 const ir_variable *const front_color =
736 parameters.get_variable("gl_FrontSecondaryColor");
737
738 const ir_variable *const back_color =
739 parameters.get_variable("gl_BackSecondaryColor");
740
741 cross_validate_front_and_back_color(ctx, prog, input,
742 front_color, back_color,
743 consumer->Stage, producer->Stage);
744 } else {
745 /* The rules for connecting inputs and outputs change in the presence
746 * of explicit locations. In this case, we no longer care about the
747 * names of the variables. Instead, we care only about the
748 * explicitly assigned location.
749 */
750 ir_variable *output = NULL;
751 if (input->data.explicit_location
752 && input->data.location >= VARYING_SLOT_VAR0) {
753
754 const glsl_type *type = get_varying_type(input, consumer->Stage);
755 unsigned num_elements = type->count_attribute_slots(false);
756 unsigned idx =
757 compute_variable_location_slot(input, consumer->Stage);
758 unsigned slot_limit = idx + num_elements;
759
760 while (idx < slot_limit) {
761 if (idx >= MAX_VARYING) {
762 linker_error(prog,
763 "Invalid location %u in %s shader\n", idx,
764 _mesa_shader_stage_to_string(consumer->Stage));
765 return;
766 }
767
768 output = explicit_locations[idx][input->data.location_frac].var;
769
770 if (output == NULL ||
771 input->data.location != output->data.location) {
772 linker_error(prog,
773 "%s shader input `%s' with explicit location "
774 "has no matching output\n",
775 _mesa_shader_stage_to_string(consumer->Stage),
776 input->name);
777 break;
778 }
779 idx++;
780 }
781 } else {
782 output = parameters.get_variable(input->name);
783 }
784
785 if (output != NULL) {
786 /* Interface blocks have their own validation elsewhere so don't
787 * try validating them here.
788 */
789 if (!(input->get_interface_type() &&
790 output->get_interface_type()))
791 cross_validate_types_and_qualifiers(ctx, prog, input, output,
792 consumer->Stage,
793 producer->Stage);
794 } else {
795 /* Check for input vars with unmatched output vars in prev stage
796 * taking into account that interface blocks could have a matching
797 * output but with different name, so we ignore them.
798 */
799 assert(!input->data.assigned);
800 if (input->data.used && !input->get_interface_type() &&
801 !input->data.explicit_location && !prog->SeparateShader)
802 linker_error(prog,
803 "%s shader input `%s' "
804 "has no matching output in the previous stage\n",
805 _mesa_shader_stage_to_string(consumer->Stage),
806 input->name);
807 }
808 }
809 }
810 }
811
812 /**
813 * Demote shader inputs and outputs that are not used in other stages, and
814 * remove them via dead code elimination.
815 */
816 static void
817 remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
818 gl_linked_shader *sh,
819 enum ir_variable_mode mode)
820 {
821 if (is_separate_shader_object)
822 return;
823
824 foreach_in_list(ir_instruction, node, sh->ir) {
825 ir_variable *const var = node->as_variable();
826
827 if (var == NULL || var->data.mode != int(mode))
828 continue;
829
830 /* A shader 'in' or 'out' variable is only really an input or output if
831 * its value is used by other shader stages. This will cause the
832 * variable to have a location assigned.
833 */
834 if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
835 assert(var->data.mode != ir_var_temporary);
836
837 /* Assign zeros to demoted inputs to allow more optimizations. */
838 if (var->data.mode == ir_var_shader_in && !var->constant_value)
839 var->constant_value = ir_constant::zero(var, var->type);
840
841 var->data.mode = ir_var_auto;
842 }
843 }
844
845 /* Eliminate code that is now dead due to unused inputs/outputs being
846 * demoted.
847 */
848 while (do_dead_code(sh->ir, false))
849 ;
850
851 }
852
853 /**
854 * Initialize this object based on a string that was passed to
855 * glTransformFeedbackVaryings.
856 *
857 * If the input is mal-formed, this call still succeeds, but it sets
858 * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
859 * will fail to find any matching variable.
860 */
861 void
862 tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
863 const char *input)
864 {
865 /* We don't have to be pedantic about what is a valid GLSL variable name,
866 * because any variable with an invalid name can't exist in the IR anyway.
867 */
868
869 this->location = -1;
870 this->orig_name = input;
871 this->lowered_builtin_array_variable = none;
872 this->skip_components = 0;
873 this->next_buffer_separator = false;
874 this->matched_candidate = NULL;
875 this->stream_id = 0;
876 this->buffer = 0;
877 this->offset = 0;
878
879 if (ctx->Extensions.ARB_transform_feedback3) {
880 /* Parse gl_NextBuffer. */
881 if (strcmp(input, "gl_NextBuffer") == 0) {
882 this->next_buffer_separator = true;
883 return;
884 }
885
886 /* Parse gl_SkipComponents. */
887 if (strcmp(input, "gl_SkipComponents1") == 0)
888 this->skip_components = 1;
889 else if (strcmp(input, "gl_SkipComponents2") == 0)
890 this->skip_components = 2;
891 else if (strcmp(input, "gl_SkipComponents3") == 0)
892 this->skip_components = 3;
893 else if (strcmp(input, "gl_SkipComponents4") == 0)
894 this->skip_components = 4;
895
896 if (this->skip_components)
897 return;
898 }
899
900 /* Parse a declaration. */
901 const char *base_name_end;
902 long subscript = parse_program_resource_name(input, &base_name_end);
903 this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
904 if (this->var_name == NULL) {
905 _mesa_error_no_memory(__func__);
906 return;
907 }
908
909 if (subscript >= 0) {
910 this->array_subscript = subscript;
911 this->is_subscripted = true;
912 } else {
913 this->is_subscripted = false;
914 }
915
916 /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
917 * class must behave specially to account for the fact that gl_ClipDistance
918 * is converted from a float[8] to a vec4[2].
919 */
920 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
921 strcmp(this->var_name, "gl_ClipDistance") == 0) {
922 this->lowered_builtin_array_variable = clip_distance;
923 }
924 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
925 strcmp(this->var_name, "gl_CullDistance") == 0) {
926 this->lowered_builtin_array_variable = cull_distance;
927 }
928
929 if (ctx->Const.LowerTessLevel &&
930 (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
931 this->lowered_builtin_array_variable = tess_level_outer;
932 if (ctx->Const.LowerTessLevel &&
933 (strcmp(this->var_name, "gl_TessLevelInner") == 0))
934 this->lowered_builtin_array_variable = tess_level_inner;
935 }
936
937
938 /**
939 * Determine whether two tfeedback_decl objects refer to the same variable and
940 * array index (if applicable).
941 */
942 bool
943 tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
944 {
945 assert(x.is_varying() && y.is_varying());
946
947 if (strcmp(x.var_name, y.var_name) != 0)
948 return false;
949 if (x.is_subscripted != y.is_subscripted)
950 return false;
951 if (x.is_subscripted && x.array_subscript != y.array_subscript)
952 return false;
953 return true;
954 }
955
956
957 /**
958 * Assign a location and stream ID for this tfeedback_decl object based on the
959 * transform feedback candidate found by find_candidate.
960 *
961 * If an error occurs, the error is reported through linker_error() and false
962 * is returned.
963 */
964 bool
965 tfeedback_decl::assign_location(struct gl_context *ctx,
966 struct gl_shader_program *prog)
967 {
968 assert(this->is_varying());
969
970 unsigned fine_location
971 = this->matched_candidate->toplevel_var->data.location * 4
972 + this->matched_candidate->toplevel_var->data.location_frac
973 + this->matched_candidate->offset;
974 const unsigned dmul =
975 this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
976
977 if (this->matched_candidate->type->is_array()) {
978 /* Array variable */
979 const unsigned matrix_cols =
980 this->matched_candidate->type->fields.array->matrix_columns;
981 const unsigned vector_elements =
982 this->matched_candidate->type->fields.array->vector_elements;
983 unsigned actual_array_size;
984 switch (this->lowered_builtin_array_variable) {
985 case clip_distance:
986 actual_array_size = prog->last_vert_prog ?
987 prog->last_vert_prog->info.clip_distance_array_size : 0;
988 break;
989 case cull_distance:
990 actual_array_size = prog->last_vert_prog ?
991 prog->last_vert_prog->info.cull_distance_array_size : 0;
992 break;
993 case tess_level_outer:
994 actual_array_size = 4;
995 break;
996 case tess_level_inner:
997 actual_array_size = 2;
998 break;
999 case none:
1000 default:
1001 actual_array_size = this->matched_candidate->type->array_size();
1002 break;
1003 }
1004
1005 if (this->is_subscripted) {
1006 /* Check array bounds. */
1007 if (this->array_subscript >= actual_array_size) {
1008 linker_error(prog, "Transform feedback varying %s has index "
1009 "%i, but the array size is %u.",
1010 this->orig_name, this->array_subscript,
1011 actual_array_size);
1012 return false;
1013 }
1014 unsigned array_elem_size = this->lowered_builtin_array_variable ?
1015 1 : vector_elements * matrix_cols * dmul;
1016 fine_location += array_elem_size * this->array_subscript;
1017 this->size = 1;
1018 } else {
1019 this->size = actual_array_size;
1020 }
1021 this->vector_elements = vector_elements;
1022 this->matrix_columns = matrix_cols;
1023 if (this->lowered_builtin_array_variable)
1024 this->type = GL_FLOAT;
1025 else
1026 this->type = this->matched_candidate->type->fields.array->gl_type;
1027 } else {
1028 /* Regular variable (scalar, vector, or matrix) */
1029 if (this->is_subscripted) {
1030 linker_error(prog, "Transform feedback varying %s requested, "
1031 "but %s is not an array.",
1032 this->orig_name, this->var_name);
1033 return false;
1034 }
1035 this->size = 1;
1036 this->vector_elements = this->matched_candidate->type->vector_elements;
1037 this->matrix_columns = this->matched_candidate->type->matrix_columns;
1038 this->type = this->matched_candidate->type->gl_type;
1039 }
1040 this->location = fine_location / 4;
1041 this->location_frac = fine_location % 4;
1042
1043 /* From GL_EXT_transform_feedback:
1044 * A program will fail to link if:
1045 *
1046 * * the total number of components to capture in any varying
1047 * variable in <varyings> is greater than the constant
1048 * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
1049 * buffer mode is SEPARATE_ATTRIBS_EXT;
1050 */
1051 if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
1052 this->num_components() >
1053 ctx->Const.MaxTransformFeedbackSeparateComponents) {
1054 linker_error(prog, "Transform feedback varying %s exceeds "
1055 "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
1056 this->orig_name);
1057 return false;
1058 }
1059
1060 /* Only transform feedback varyings can be assigned to non-zero streams,
1061 * so assign the stream id here.
1062 */
1063 this->stream_id = this->matched_candidate->toplevel_var->data.stream;
1064
1065 unsigned array_offset = this->array_subscript * 4 * dmul;
1066 unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
1067 this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
1068 this->offset = this->matched_candidate->toplevel_var->data.offset +
1069 array_offset + struct_offset;
1070
1071 return true;
1072 }
1073
1074
1075 unsigned
1076 tfeedback_decl::get_num_outputs() const
1077 {
1078 if (!this->is_varying()) {
1079 return 0;
1080 }
1081 return (this->num_components() + this->location_frac + 3)/4;
1082 }
1083
1084
1085 /**
1086 * Update gl_transform_feedback_info to reflect this tfeedback_decl.
1087 *
1088 * If an error occurs, the error is reported through linker_error() and false
1089 * is returned.
1090 */
1091 bool
1092 tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
1093 struct gl_transform_feedback_info *info,
1094 unsigned buffer, unsigned buffer_index,
1095 const unsigned max_outputs, bool *explicit_stride,
1096 bool has_xfb_qualifiers) const
1097 {
1098 unsigned xfb_offset = 0;
1099 unsigned size = this->size;
1100 /* Handle gl_SkipComponents. */
1101 if (this->skip_components) {
1102 info->Buffers[buffer].Stride += this->skip_components;
1103 size = this->skip_components;
1104 goto store_varying;
1105 }
1106
1107 if (this->next_buffer_separator) {
1108 size = 0;
1109 goto store_varying;
1110 }
1111
1112 if (has_xfb_qualifiers) {
1113 xfb_offset = this->offset / 4;
1114 } else {
1115 xfb_offset = info->Buffers[buffer].Stride;
1116 }
1117 info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
1118
1119 {
1120 unsigned location = this->location;
1121 unsigned location_frac = this->location_frac;
1122 unsigned num_components = this->num_components();
1123 while (num_components > 0) {
1124 unsigned output_size = MIN2(num_components, 4 - location_frac);
1125 assert((info->NumOutputs == 0 && max_outputs == 0) ||
1126 info->NumOutputs < max_outputs);
1127
1128 /* From the ARB_enhanced_layouts spec:
1129 *
1130 * "If such a block member or variable is not written during a shader
1131 * invocation, the buffer contents at the assigned offset will be
1132 * undefined. Even if there are no static writes to a variable or
1133 * member that is assigned a transform feedback offset, the space is
1134 * still allocated in the buffer and still affects the stride."
1135 */
1136 if (this->is_varying_written()) {
1137 info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
1138 info->Outputs[info->NumOutputs].OutputRegister = location;
1139 info->Outputs[info->NumOutputs].NumComponents = output_size;
1140 info->Outputs[info->NumOutputs].StreamId = stream_id;
1141 info->Outputs[info->NumOutputs].OutputBuffer = buffer;
1142 info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
1143 ++info->NumOutputs;
1144 }
1145 info->Buffers[buffer].Stream = this->stream_id;
1146 xfb_offset += output_size;
1147
1148 num_components -= output_size;
1149 location++;
1150 location_frac = 0;
1151 }
1152 }
1153
1154 if (explicit_stride && explicit_stride[buffer]) {
1155 if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
1156 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
1157 "multiple of 8 as its applied to a type that is or "
1158 "contains a double.",
1159 info->Buffers[buffer].Stride * 4);
1160 return false;
1161 }
1162
1163 if ((this->offset / 4) / info->Buffers[buffer].Stride !=
1164 (xfb_offset - 1) / info->Buffers[buffer].Stride) {
1165 linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
1166 "buffer (%d)", xfb_offset * 4,
1167 info->Buffers[buffer].Stride * 4, buffer);
1168 return false;
1169 }
1170 } else {
1171 info->Buffers[buffer].Stride = xfb_offset;
1172 }
1173
1174 /* From GL_EXT_transform_feedback:
1175 * A program will fail to link if:
1176 *
1177 * * the total number of components to capture is greater than
1178 * the constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
1179 * and the buffer mode is INTERLEAVED_ATTRIBS_EXT.
1180 *
1181 * From GL_ARB_enhanced_layouts:
1182 *
1183 * "The resulting stride (implicit or explicit) must be less than or
1184 * equal to the implementation-dependent constant
1185 * gl_MaxTransformFeedbackInterleavedComponents."
1186 */
1187 if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
1188 has_xfb_qualifiers) &&
1189 info->Buffers[buffer].Stride >
1190 ctx->Const.MaxTransformFeedbackInterleavedComponents) {
1191 linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
1192 "limit has been exceeded.");
1193 return false;
1194 }
1195
1196 store_varying:
1197 info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
1198 this->orig_name);
1199 info->Varyings[info->NumVarying].Type = this->type;
1200 info->Varyings[info->NumVarying].Size = size;
1201 info->Varyings[info->NumVarying].BufferIndex = buffer_index;
1202 info->NumVarying++;
1203 info->Buffers[buffer].NumVaryings++;
1204
1205 return true;
1206 }
1207
1208
1209 const tfeedback_candidate *
1210 tfeedback_decl::find_candidate(gl_shader_program *prog,
1211 hash_table *tfeedback_candidates)
1212 {
1213 const char *name = this->var_name;
1214 switch (this->lowered_builtin_array_variable) {
1215 case none:
1216 name = this->var_name;
1217 break;
1218 case clip_distance:
1219 name = "gl_ClipDistanceMESA";
1220 break;
1221 case cull_distance:
1222 name = "gl_CullDistanceMESA";
1223 break;
1224 case tess_level_outer:
1225 name = "gl_TessLevelOuterMESA";
1226 break;
1227 case tess_level_inner:
1228 name = "gl_TessLevelInnerMESA";
1229 break;
1230 }
1231 hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
1232
1233 this->matched_candidate = entry ?
1234 (const tfeedback_candidate *) entry->data : NULL;
1235
1236 if (!this->matched_candidate) {
1237 /* From GL_EXT_transform_feedback:
1238 * A program will fail to link if:
1239 *
1240 * * any variable name specified in the <varyings> array is not
1241 * declared as an output in the geometry shader (if present) or
1242 * the vertex shader (if no geometry shader is present);
1243 */
1244 linker_error(prog, "Transform feedback varying %s undeclared.",
1245 this->orig_name);
1246 }
1247
1248 return this->matched_candidate;
1249 }
1250
1251
1252 /**
1253 * Parse all the transform feedback declarations that were passed to
1254 * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
1255 *
1256 * If an error occurs, the error is reported through linker_error() and false
1257 * is returned.
1258 */
1259 static bool
1260 parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
1261 const void *mem_ctx, unsigned num_names,
1262 char **varying_names, tfeedback_decl *decls)
1263 {
1264 for (unsigned i = 0; i < num_names; ++i) {
1265 decls[i].init(ctx, mem_ctx, varying_names[i]);
1266
1267 if (!decls[i].is_varying())
1268 continue;
1269
1270 /* From GL_EXT_transform_feedback:
1271 * A program will fail to link if:
1272 *
1273 * * any two entries in the <varyings> array specify the same varying
1274 * variable;
1275 *
1276 * We interpret this to mean "any two entries in the <varyings> array
1277 * specify the same varying variable and array index", since transform
1278 * feedback of arrays would be useless otherwise.
1279 */
1280 for (unsigned j = 0; j < i; ++j) {
1281 if (decls[j].is_varying()) {
1282 if (tfeedback_decl::is_same(decls[i], decls[j])) {
1283 linker_error(prog, "Transform feedback varying %s specified "
1284 "more than once.", varying_names[i]);
1285 return false;
1286 }
1287 }
1288 }
1289 }
1290 return true;
1291 }
1292
1293
1294 static int
1295 cmp_xfb_offset(const void * x_generic, const void * y_generic)
1296 {
1297 tfeedback_decl *x = (tfeedback_decl *) x_generic;
1298 tfeedback_decl *y = (tfeedback_decl *) y_generic;
1299
1300 if (x->get_buffer() != y->get_buffer())
1301 return x->get_buffer() - y->get_buffer();
1302 return x->get_offset() - y->get_offset();
1303 }
1304
1305 /**
1306 * Store transform feedback location assignments into
1307 * prog->sh.LinkedTransformFeedback based on the data stored in
1308 * tfeedback_decls.
1309 *
1310 * If an error occurs, the error is reported through linker_error() and false
1311 * is returned.
1312 */
1313 static bool
1314 store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
1315 unsigned num_tfeedback_decls,
1316 tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers)
1317 {
1318 if (!prog->last_vert_prog)
1319 return true;
1320
1321 /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
1322 * tracking the number of buffers doesn't overflow.
1323 */
1324 assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
1325
1326 bool separate_attribs_mode =
1327 prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
1328
1329 struct gl_program *xfb_prog = prog->last_vert_prog;
1330 xfb_prog->sh.LinkedTransformFeedback =
1331 rzalloc(xfb_prog, struct gl_transform_feedback_info);
1332
1333 /* The xfb_offset qualifier does not have to be used in increasing order
1334 * however some drivers expect to receive the list of transform feedback
1335 * declarations in order so sort it now for convenience.
1336 */
1337 if (has_xfb_qualifiers) {
1338 qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
1339 cmp_xfb_offset);
1340 }
1341
1342 xfb_prog->sh.LinkedTransformFeedback->Varyings =
1343 rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
1344 num_tfeedback_decls);
1345
1346 unsigned num_outputs = 0;
1347 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1348 if (tfeedback_decls[i].is_varying_written())
1349 num_outputs += tfeedback_decls[i].get_num_outputs();
1350 }
1351
1352 xfb_prog->sh.LinkedTransformFeedback->Outputs =
1353 rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
1354 num_outputs);
1355
1356 unsigned num_buffers = 0;
1357 unsigned buffers = 0;
1358
1359 if (!has_xfb_qualifiers && separate_attribs_mode) {
1360 /* GL_SEPARATE_ATTRIBS */
1361 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1362 if (!tfeedback_decls[i].store(ctx, prog,
1363 xfb_prog->sh.LinkedTransformFeedback,
1364 num_buffers, num_buffers, num_outputs,
1365 NULL, has_xfb_qualifiers))
1366 return false;
1367
1368 buffers |= 1 << num_buffers;
1369 num_buffers++;
1370 }
1371 }
1372 else {
1373 /* GL_INVERLEAVED_ATTRIBS */
1374 int buffer_stream_id = -1;
1375 unsigned buffer =
1376 num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
1377 bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
1378
1379 /* Apply any xfb_stride global qualifiers */
1380 if (has_xfb_qualifiers) {
1381 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1382 if (prog->TransformFeedback.BufferStride[j]) {
1383 explicit_stride[j] = true;
1384 xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
1385 prog->TransformFeedback.BufferStride[j] / 4;
1386 }
1387 }
1388 }
1389
1390 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1391 if (has_xfb_qualifiers &&
1392 buffer != tfeedback_decls[i].get_buffer()) {
1393 /* we have moved to the next buffer so reset stream id */
1394 buffer_stream_id = -1;
1395 num_buffers++;
1396 }
1397
1398 if (tfeedback_decls[i].is_next_buffer_separator()) {
1399 if (!tfeedback_decls[i].store(ctx, prog,
1400 xfb_prog->sh.LinkedTransformFeedback,
1401 buffer, num_buffers, num_outputs,
1402 explicit_stride, has_xfb_qualifiers))
1403 return false;
1404 num_buffers++;
1405 buffer_stream_id = -1;
1406 continue;
1407 }
1408
1409 if (has_xfb_qualifiers) {
1410 buffer = tfeedback_decls[i].get_buffer();
1411 } else {
1412 buffer = num_buffers;
1413 }
1414
1415 if (tfeedback_decls[i].is_varying()) {
1416 if (buffer_stream_id == -1) {
1417 /* First varying writing to this buffer: remember its stream */
1418 buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
1419
1420 /* Only mark a buffer as active when there is a varying
1421 * attached to it. This behaviour is based on a revised version
1422 * of section 13.2.2 of the GL 4.6 spec.
1423 */
1424 buffers |= 1 << buffer;
1425 } else if (buffer_stream_id !=
1426 (int) tfeedback_decls[i].get_stream_id()) {
1427 /* Varying writes to the same buffer from a different stream */
1428 linker_error(prog,
1429 "Transform feedback can't capture varyings belonging "
1430 "to different vertex streams in a single buffer. "
1431 "Varying %s writes to buffer from stream %u, other "
1432 "varyings in the same buffer write from stream %u.",
1433 tfeedback_decls[i].name(),
1434 tfeedback_decls[i].get_stream_id(),
1435 buffer_stream_id);
1436 return false;
1437 }
1438 }
1439
1440 if (!tfeedback_decls[i].store(ctx, prog,
1441 xfb_prog->sh.LinkedTransformFeedback,
1442 buffer, num_buffers, num_outputs,
1443 explicit_stride, has_xfb_qualifiers))
1444 return false;
1445 }
1446 }
1447
1448 assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
1449
1450 xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
1451 return true;
1452 }
1453
1454 namespace {
1455
1456 /**
1457 * Data structure recording the relationship between outputs of one shader
1458 * stage (the "producer") and inputs of another (the "consumer").
1459 */
1460 class varying_matches
1461 {
1462 public:
1463 varying_matches(bool disable_varying_packing, bool xfb_enabled,
1464 bool enhanced_layouts_enabled,
1465 gl_shader_stage producer_stage,
1466 gl_shader_stage consumer_stage);
1467 ~varying_matches();
1468 void record(ir_variable *producer_var, ir_variable *consumer_var);
1469 unsigned assign_locations(struct gl_shader_program *prog,
1470 uint8_t components[],
1471 uint64_t reserved_slots);
1472 void store_locations() const;
1473
1474 private:
1475 bool is_varying_packing_safe(const glsl_type *type,
1476 const ir_variable *var) const;
1477
1478 /**
1479 * If true, this driver disables varying packing, so all varyings need to
1480 * be aligned on slot boundaries, and take up a number of slots equal to
1481 * their number of matrix columns times their array size.
1482 *
1483 * Packing may also be disabled because our current packing method is not
1484 * safe in SSO or versions of OpenGL where interpolation qualifiers are not
1485 * guaranteed to match across stages.
1486 */
1487 const bool disable_varying_packing;
1488
1489 /**
1490 * If true, this driver has transform feedback enabled. The transform
1491 * feedback code requires at least some packing be done even when varying
1492 * packing is disabled, fortunately where transform feedback requires
1493 * packing it's safe to override the disabled setting. See
1494 * is_varying_packing_safe().
1495 */
1496 const bool xfb_enabled;
1497
1498 const bool enhanced_layouts_enabled;
1499
1500 /**
1501 * Enum representing the order in which varyings are packed within a
1502 * packing class.
1503 *
1504 * Currently we pack vec4's first, then vec2's, then scalar values, then
1505 * vec3's. This order ensures that the only vectors that are at risk of
1506 * having to be "double parked" (split between two adjacent varying slots)
1507 * are the vec3's.
1508 */
1509 enum packing_order_enum {
1510 PACKING_ORDER_VEC4,
1511 PACKING_ORDER_VEC2,
1512 PACKING_ORDER_SCALAR,
1513 PACKING_ORDER_VEC3,
1514 };
1515
1516 static unsigned compute_packing_class(const ir_variable *var);
1517 static packing_order_enum compute_packing_order(const ir_variable *var);
1518 static int match_comparator(const void *x_generic, const void *y_generic);
1519 static int xfb_comparator(const void *x_generic, const void *y_generic);
1520
1521 /**
1522 * Structure recording the relationship between a single producer output
1523 * and a single consumer input.
1524 */
1525 struct match {
1526 /**
1527 * Packing class for this varying, computed by compute_packing_class().
1528 */
1529 unsigned packing_class;
1530
1531 /**
1532 * Packing order for this varying, computed by compute_packing_order().
1533 */
1534 packing_order_enum packing_order;
1535 unsigned num_components;
1536
1537 /**
1538 * The output variable in the producer stage.
1539 */
1540 ir_variable *producer_var;
1541
1542 /**
1543 * The input variable in the consumer stage.
1544 */
1545 ir_variable *consumer_var;
1546
1547 /**
1548 * The location which has been assigned for this varying. This is
1549 * expressed in multiples of a float, with the first generic varying
1550 * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
1551 * value 0.
1552 */
1553 unsigned generic_location;
1554 } *matches;
1555
1556 /**
1557 * The number of elements in the \c matches array that are currently in
1558 * use.
1559 */
1560 unsigned num_matches;
1561
1562 /**
1563 * The number of elements that were set aside for the \c matches array when
1564 * it was allocated.
1565 */
1566 unsigned matches_capacity;
1567
1568 gl_shader_stage producer_stage;
1569 gl_shader_stage consumer_stage;
1570 };
1571
1572 } /* anonymous namespace */
1573
1574 varying_matches::varying_matches(bool disable_varying_packing,
1575 bool xfb_enabled,
1576 bool enhanced_layouts_enabled,
1577 gl_shader_stage producer_stage,
1578 gl_shader_stage consumer_stage)
1579 : disable_varying_packing(disable_varying_packing),
1580 xfb_enabled(xfb_enabled),
1581 enhanced_layouts_enabled(enhanced_layouts_enabled),
1582 producer_stage(producer_stage),
1583 consumer_stage(consumer_stage)
1584 {
1585 /* Note: this initial capacity is rather arbitrarily chosen to be large
1586 * enough for many cases without wasting an unreasonable amount of space.
1587 * varying_matches::record() will resize the array if there are more than
1588 * this number of varyings.
1589 */
1590 this->matches_capacity = 8;
1591 this->matches = (match *)
1592 malloc(sizeof(*this->matches) * this->matches_capacity);
1593 this->num_matches = 0;
1594 }
1595
1596
1597 varying_matches::~varying_matches()
1598 {
1599 free(this->matches);
1600 }
1601
1602
1603 /**
1604 * Packing is always safe on individual arrays, structures, and matrices. It
1605 * is also safe if the varying is only used for transform feedback.
1606 */
1607 bool
1608 varying_matches::is_varying_packing_safe(const glsl_type *type,
1609 const ir_variable *var) const
1610 {
1611 if (consumer_stage == MESA_SHADER_TESS_EVAL ||
1612 consumer_stage == MESA_SHADER_TESS_CTRL ||
1613 producer_stage == MESA_SHADER_TESS_CTRL)
1614 return false;
1615
1616 return xfb_enabled && (type->is_array() || type->is_record() ||
1617 type->is_matrix() || var->data.is_xfb_only);
1618 }
1619
1620
1621 /**
1622 * Record the given producer/consumer variable pair in the list of variables
1623 * that should later be assigned locations.
1624 *
1625 * It is permissible for \c consumer_var to be NULL (this happens if a
1626 * variable is output by the producer and consumed by transform feedback, but
1627 * not consumed by the consumer).
1628 *
1629 * If \c producer_var has already been paired up with a consumer_var, or
1630 * producer_var is part of fixed pipeline functionality (and hence already has
1631 * a location assigned), this function has no effect.
1632 *
1633 * Note: as a side effect this function may change the interpolation type of
1634 * \c producer_var, but only when the change couldn't possibly affect
1635 * rendering.
1636 */
1637 void
1638 varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
1639 {
1640 assert(producer_var != NULL || consumer_var != NULL);
1641
1642 if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
1643 producer_var->data.explicit_location)) ||
1644 (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
1645 consumer_var->data.explicit_location))) {
1646 /* Either a location already exists for this variable (since it is part
1647 * of fixed functionality), or it has already been recorded as part of a
1648 * previous match.
1649 */
1650 return;
1651 }
1652
1653 bool needs_flat_qualifier = consumer_var == NULL &&
1654 (producer_var->type->contains_integer() ||
1655 producer_var->type->contains_double());
1656
1657 if (!disable_varying_packing &&
1658 (needs_flat_qualifier ||
1659 (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
1660 /* Since this varying is not being consumed by the fragment shader, its
1661 * interpolation type varying cannot possibly affect rendering.
1662 * Also, this variable is non-flat and is (or contains) an integer
1663 * or a double.
1664 * If the consumer stage is unknown, don't modify the interpolation
1665 * type as it could affect rendering later with separate shaders.
1666 *
1667 * lower_packed_varyings requires all integer varyings to flat,
1668 * regardless of where they appear. We can trivially satisfy that
1669 * requirement by changing the interpolation type to flat here.
1670 */
1671 if (producer_var) {
1672 producer_var->data.centroid = false;
1673 producer_var->data.sample = false;
1674 producer_var->data.interpolation = INTERP_MODE_FLAT;
1675 }
1676
1677 if (consumer_var) {
1678 consumer_var->data.centroid = false;
1679 consumer_var->data.sample = false;
1680 consumer_var->data.interpolation = INTERP_MODE_FLAT;
1681 }
1682 }
1683
1684 if (this->num_matches == this->matches_capacity) {
1685 this->matches_capacity *= 2;
1686 this->matches = (match *)
1687 realloc(this->matches,
1688 sizeof(*this->matches) * this->matches_capacity);
1689 }
1690
1691 /* We must use the consumer to compute the packing class because in GL4.4+
1692 * there is no guarantee interpolation qualifiers will match across stages.
1693 *
1694 * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
1695 *
1696 * "The type and presence of interpolation qualifiers of variables with
1697 * the same name declared in all linked shaders for the same cross-stage
1698 * interface must match, otherwise the link command will fail.
1699 *
1700 * When comparing an output from one stage to an input of a subsequent
1701 * stage, the input and output don't match if their interpolation
1702 * qualifiers (or lack thereof) are not the same."
1703 *
1704 * This text was also in at least revison 7 of the 4.40 spec but is no
1705 * longer in revision 9 and not in the 4.50 spec.
1706 */
1707 const ir_variable *const var = (consumer_var != NULL)
1708 ? consumer_var : producer_var;
1709 const gl_shader_stage stage = (consumer_var != NULL)
1710 ? consumer_stage : producer_stage;
1711 const glsl_type *type = get_varying_type(var, stage);
1712
1713 if (producer_var && consumer_var &&
1714 consumer_var->data.must_be_shader_input) {
1715 producer_var->data.must_be_shader_input = 1;
1716 }
1717
1718 this->matches[this->num_matches].packing_class
1719 = this->compute_packing_class(var);
1720 this->matches[this->num_matches].packing_order
1721 = this->compute_packing_order(var);
1722 if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
1723 var->data.must_be_shader_input) {
1724 unsigned slots = type->count_attribute_slots(false);
1725 this->matches[this->num_matches].num_components = slots * 4;
1726 } else {
1727 this->matches[this->num_matches].num_components
1728 = type->component_slots();
1729 }
1730
1731 this->matches[this->num_matches].producer_var = producer_var;
1732 this->matches[this->num_matches].consumer_var = consumer_var;
1733 this->num_matches++;
1734 if (producer_var)
1735 producer_var->data.is_unmatched_generic_inout = 0;
1736 if (consumer_var)
1737 consumer_var->data.is_unmatched_generic_inout = 0;
1738 }
1739
1740
1741 /**
1742 * Choose locations for all of the variable matches that were previously
1743 * passed to varying_matches::record().
1744 * \param components returns array[slot] of number of components used
1745 * per slot (1, 2, 3 or 4)
1746 * \param reserved_slots bitmask indicating which varying slots are already
1747 * allocated
1748 * \return number of slots (4-element vectors) allocated
1749 */
1750 unsigned
1751 varying_matches::assign_locations(struct gl_shader_program *prog,
1752 uint8_t components[],
1753 uint64_t reserved_slots)
1754 {
1755 /* If packing has been disabled then we cannot safely sort the varyings by
1756 * class as it may mean we are using a version of OpenGL where
1757 * interpolation qualifiers are not guaranteed to be matching across
1758 * shaders, sorting in this case could result in mismatching shader
1759 * interfaces.
1760 * When packing is disabled the sort orders varyings used by transform
1761 * feedback first, but also depends on *undefined behaviour* of qsort to
1762 * reverse the order of the varyings. See: xfb_comparator().
1763 */
1764 if (!this->disable_varying_packing) {
1765 /* Sort varying matches into an order that makes them easy to pack. */
1766 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1767 &varying_matches::match_comparator);
1768 } else {
1769 /* Only sort varyings that are only used by transform feedback. */
1770 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1771 &varying_matches::xfb_comparator);
1772 }
1773
1774 unsigned generic_location = 0;
1775 unsigned generic_patch_location = MAX_VARYING*4;
1776 bool previous_var_xfb_only = false;
1777 unsigned previous_packing_class = ~0u;
1778
1779 /* For tranform feedback separate mode, we know the number of attributes
1780 * is <= the number of buffers. So packing isn't critical. In fact,
1781 * packing vec3 attributes can cause trouble because splitting a vec3
1782 * effectively creates an additional transform feedback output. The
1783 * extra TFB output may exceed device driver limits.
1784 */
1785 const bool dont_pack_vec3 =
1786 (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
1787 prog->TransformFeedback.NumVarying > 0);
1788
1789 for (unsigned i = 0; i < this->num_matches; i++) {
1790 unsigned *location = &generic_location;
1791 const ir_variable *var;
1792 const glsl_type *type;
1793 bool is_vertex_input = false;
1794
1795 if (matches[i].consumer_var) {
1796 var = matches[i].consumer_var;
1797 type = get_varying_type(var, consumer_stage);
1798 if (consumer_stage == MESA_SHADER_VERTEX)
1799 is_vertex_input = true;
1800 } else {
1801 var = matches[i].producer_var;
1802 type = get_varying_type(var, producer_stage);
1803 }
1804
1805 if (var->data.patch)
1806 location = &generic_patch_location;
1807
1808 /* Advance to the next slot if this varying has a different packing
1809 * class than the previous one, and we're not already on a slot
1810 * boundary.
1811 *
1812 * Also advance to the next slot if packing is disabled. This makes sure
1813 * we don't assign varyings the same locations which is possible
1814 * because we still pack individual arrays, records and matrices even
1815 * when packing is disabled. Note we don't advance to the next slot if
1816 * we can pack varyings together that are only used for transform
1817 * feedback.
1818 */
1819 if (var->data.must_be_shader_input ||
1820 (this->disable_varying_packing &&
1821 !(previous_var_xfb_only && var->data.is_xfb_only)) ||
1822 (previous_packing_class != this->matches[i].packing_class) ||
1823 (this->matches[i].packing_order == PACKING_ORDER_VEC3 &&
1824 dont_pack_vec3)) {
1825 *location = ALIGN(*location, 4);
1826 }
1827
1828 previous_var_xfb_only = var->data.is_xfb_only;
1829 previous_packing_class = this->matches[i].packing_class;
1830
1831 /* The number of components taken up by this variable. For vertex shader
1832 * inputs, we use the number of slots * 4, as they have different
1833 * counting rules.
1834 */
1835 unsigned num_components = is_vertex_input ?
1836 type->count_attribute_slots(is_vertex_input) * 4 :
1837 this->matches[i].num_components;
1838
1839 /* The last slot for this variable, inclusive. */
1840 unsigned slot_end = *location + num_components - 1;
1841
1842 /* FIXME: We could be smarter in the below code and loop back over
1843 * trying to fill any locations that we skipped because we couldn't pack
1844 * the varying between an explicit location. For now just let the user
1845 * hit the linking error if we run out of room and suggest they use
1846 * explicit locations.
1847 */
1848 while (slot_end < MAX_VARYING * 4u) {
1849 const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
1850 const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
1851
1852 assert(slots > 0);
1853
1854 if ((reserved_slots & slot_mask) == 0) {
1855 break;
1856 }
1857
1858 *location = ALIGN(*location + 1, 4);
1859 slot_end = *location + num_components - 1;
1860 }
1861
1862 if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
1863 linker_error(prog, "insufficient contiguous locations available for "
1864 "%s it is possible an array or struct could not be "
1865 "packed between varyings with explicit locations. Try "
1866 "using an explicit location for arrays and structs.",
1867 var->name);
1868 }
1869
1870 if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
1871 for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
1872 components[j] = 4;
1873 components[slot_end / 4u] = (slot_end & 3) + 1;
1874 }
1875
1876 this->matches[i].generic_location = *location;
1877
1878 *location = slot_end + 1;
1879 }
1880
1881 return (generic_location + 3) / 4;
1882 }
1883
1884
1885 /**
1886 * Update the producer and consumer shaders to reflect the locations
1887 * assignments that were made by varying_matches::assign_locations().
1888 */
1889 void
1890 varying_matches::store_locations() const
1891 {
1892 /* Check is location needs to be packed with lower_packed_varyings() or if
1893 * we can just use ARB_enhanced_layouts packing.
1894 */
1895 bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
1896 const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
1897
1898 for (unsigned i = 0; i < this->num_matches; i++) {
1899 ir_variable *producer_var = this->matches[i].producer_var;
1900 ir_variable *consumer_var = this->matches[i].consumer_var;
1901 unsigned generic_location = this->matches[i].generic_location;
1902 unsigned slot = generic_location / 4;
1903 unsigned offset = generic_location % 4;
1904
1905 if (producer_var) {
1906 producer_var->data.location = VARYING_SLOT_VAR0 + slot;
1907 producer_var->data.location_frac = offset;
1908 }
1909
1910 if (consumer_var) {
1911 assert(consumer_var->data.location == -1);
1912 consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
1913 consumer_var->data.location_frac = offset;
1914 }
1915
1916 /* Find locations suitable for native packing via
1917 * ARB_enhanced_layouts.
1918 */
1919 if (producer_var && consumer_var) {
1920 if (enhanced_layouts_enabled) {
1921 const glsl_type *type =
1922 get_varying_type(producer_var, producer_stage);
1923 if (type->is_array() || type->is_matrix() || type->is_record() ||
1924 type->is_double()) {
1925 unsigned comp_slots = type->component_slots() + offset;
1926 unsigned slots = comp_slots / 4;
1927 if (comp_slots % 4)
1928 slots += 1;
1929
1930 for (unsigned j = 0; j < slots; j++) {
1931 pack_loc[slot + j] = true;
1932 }
1933 } else if (offset + type->vector_elements > 4) {
1934 pack_loc[slot] = true;
1935 pack_loc[slot + 1] = true;
1936 } else {
1937 loc_type[slot][offset] = type;
1938 }
1939 }
1940 }
1941 }
1942
1943 /* Attempt to use ARB_enhanced_layouts for more efficient packing if
1944 * suitable.
1945 */
1946 if (enhanced_layouts_enabled) {
1947 for (unsigned i = 0; i < this->num_matches; i++) {
1948 ir_variable *producer_var = this->matches[i].producer_var;
1949 ir_variable *consumer_var = this->matches[i].consumer_var;
1950 unsigned generic_location = this->matches[i].generic_location;
1951 unsigned slot = generic_location / 4;
1952
1953 if (pack_loc[slot] || !producer_var || !consumer_var)
1954 continue;
1955
1956 const glsl_type *type =
1957 get_varying_type(producer_var, producer_stage);
1958 bool type_match = true;
1959 for (unsigned j = 0; j < 4; j++) {
1960 if (loc_type[slot][j]) {
1961 if (type->base_type != loc_type[slot][j]->base_type)
1962 type_match = false;
1963 }
1964 }
1965
1966 if (type_match) {
1967 producer_var->data.explicit_location = 1;
1968 consumer_var->data.explicit_location = 1;
1969 producer_var->data.explicit_component = 1;
1970 consumer_var->data.explicit_component = 1;
1971 }
1972 }
1973 }
1974 }
1975
1976
1977 /**
1978 * Compute the "packing class" of the given varying. This is an unsigned
1979 * integer with the property that two variables in the same packing class can
1980 * be safely backed into the same vec4.
1981 */
1982 unsigned
1983 varying_matches::compute_packing_class(const ir_variable *var)
1984 {
1985 /* Without help from the back-end, there is no way to pack together
1986 * variables with different interpolation types, because
1987 * lower_packed_varyings must choose exactly one interpolation type for
1988 * each packed varying it creates.
1989 *
1990 * However, we can safely pack together floats, ints, and uints, because:
1991 *
1992 * - varyings of base type "int" and "uint" must use the "flat"
1993 * interpolation type, which can only occur in GLSL 1.30 and above.
1994 *
1995 * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
1996 * can store flat floats as ints without losing any information (using
1997 * the ir_unop_bitcast_* opcodes).
1998 *
1999 * Therefore, the packing class depends only on the interpolation type.
2000 */
2001 const unsigned interp = var->is_interpolation_flat()
2002 ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
2003
2004 assert(interp < (1 << 3));
2005
2006 const unsigned packing_class = (interp << 0) |
2007 (var->data.centroid << 3) |
2008 (var->data.sample << 4) |
2009 (var->data.patch << 5) |
2010 (var->data.must_be_shader_input << 6);
2011
2012 return packing_class;
2013 }
2014
2015
2016 /**
2017 * Compute the "packing order" of the given varying. This is a sort key we
2018 * use to determine when to attempt to pack the given varying relative to
2019 * other varyings in the same packing class.
2020 */
2021 varying_matches::packing_order_enum
2022 varying_matches::compute_packing_order(const ir_variable *var)
2023 {
2024 const glsl_type *element_type = var->type;
2025
2026 while (element_type->is_array()) {
2027 element_type = element_type->fields.array;
2028 }
2029
2030 switch (element_type->component_slots() % 4) {
2031 case 1: return PACKING_ORDER_SCALAR;
2032 case 2: return PACKING_ORDER_VEC2;
2033 case 3: return PACKING_ORDER_VEC3;
2034 case 0: return PACKING_ORDER_VEC4;
2035 default:
2036 assert(!"Unexpected value of vector_elements");
2037 return PACKING_ORDER_VEC4;
2038 }
2039 }
2040
2041
2042 /**
2043 * Comparison function passed to qsort() to sort varyings by packing_class and
2044 * then by packing_order.
2045 */
2046 int
2047 varying_matches::match_comparator(const void *x_generic, const void *y_generic)
2048 {
2049 const match *x = (const match *) x_generic;
2050 const match *y = (const match *) y_generic;
2051
2052 if (x->packing_class != y->packing_class)
2053 return x->packing_class - y->packing_class;
2054 return x->packing_order - y->packing_order;
2055 }
2056
2057
2058 /**
2059 * Comparison function passed to qsort() to sort varyings used only by
2060 * transform feedback when packing of other varyings is disabled.
2061 */
2062 int
2063 varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
2064 {
2065 const match *x = (const match *) x_generic;
2066
2067 if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
2068 return match_comparator(x_generic, y_generic);
2069
2070 /* FIXME: When the comparator returns 0 it means the elements being
2071 * compared are equivalent. However the qsort documentation says:
2072 *
2073 * "The order of equivalent elements is undefined."
2074 *
2075 * In practice the sort ends up reversing the order of the varyings which
2076 * means locations are also assigned in this reversed order and happens to
2077 * be what we want. This is also whats happening in
2078 * varying_matches::match_comparator().
2079 */
2080 return 0;
2081 }
2082
2083
2084 /**
2085 * Is the given variable a varying variable to be counted against the
2086 * limit in ctx->Const.MaxVarying?
2087 * This includes variables such as texcoords, colors and generic
2088 * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
2089 */
2090 static bool
2091 var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
2092 {
2093 /* Only fragment shaders will take a varying variable as an input */
2094 if (stage == MESA_SHADER_FRAGMENT &&
2095 var->data.mode == ir_var_shader_in) {
2096 switch (var->data.location) {
2097 case VARYING_SLOT_POS:
2098 case VARYING_SLOT_FACE:
2099 case VARYING_SLOT_PNTC:
2100 return false;
2101 default:
2102 return true;
2103 }
2104 }
2105 return false;
2106 }
2107
2108
2109 /**
2110 * Visitor class that generates tfeedback_candidate structs describing all
2111 * possible targets of transform feedback.
2112 *
2113 * tfeedback_candidate structs are stored in the hash table
2114 * tfeedback_candidates, which is passed to the constructor. This hash table
2115 * maps varying names to instances of the tfeedback_candidate struct.
2116 */
2117 class tfeedback_candidate_generator : public program_resource_visitor
2118 {
2119 public:
2120 tfeedback_candidate_generator(void *mem_ctx,
2121 hash_table *tfeedback_candidates)
2122 : mem_ctx(mem_ctx),
2123 tfeedback_candidates(tfeedback_candidates),
2124 toplevel_var(NULL),
2125 varying_floats(0)
2126 {
2127 }
2128
2129 void process(ir_variable *var)
2130 {
2131 /* All named varying interface blocks should be flattened by now */
2132 assert(!var->is_interface_instance());
2133
2134 this->toplevel_var = var;
2135 this->varying_floats = 0;
2136 program_resource_visitor::process(var, false);
2137 }
2138
2139 private:
2140 virtual void visit_field(const glsl_type *type, const char *name,
2141 bool /* row_major */,
2142 const glsl_type * /* record_type */,
2143 const enum glsl_interface_packing,
2144 bool /* last_field */)
2145 {
2146 assert(!type->without_array()->is_record());
2147 assert(!type->without_array()->is_interface());
2148
2149 tfeedback_candidate *candidate
2150 = rzalloc(this->mem_ctx, tfeedback_candidate);
2151 candidate->toplevel_var = this->toplevel_var;
2152 candidate->type = type;
2153 candidate->offset = this->varying_floats;
2154 _mesa_hash_table_insert(this->tfeedback_candidates,
2155 ralloc_strdup(this->mem_ctx, name),
2156 candidate);
2157 this->varying_floats += type->component_slots();
2158 }
2159
2160 /**
2161 * Memory context used to allocate hash table keys and values.
2162 */
2163 void * const mem_ctx;
2164
2165 /**
2166 * Hash table in which tfeedback_candidate objects should be stored.
2167 */
2168 hash_table * const tfeedback_candidates;
2169
2170 /**
2171 * Pointer to the toplevel variable that is being traversed.
2172 */
2173 ir_variable *toplevel_var;
2174
2175 /**
2176 * Total number of varying floats that have been visited so far. This is
2177 * used to determine the offset to each varying within the toplevel
2178 * variable.
2179 */
2180 unsigned varying_floats;
2181 };
2182
2183
2184 namespace linker {
2185
2186 void
2187 populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
2188 hash_table *consumer_inputs,
2189 hash_table *consumer_interface_inputs,
2190 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2191 {
2192 memset(consumer_inputs_with_locations,
2193 0,
2194 sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
2195
2196 foreach_in_list(ir_instruction, node, ir) {
2197 ir_variable *const input_var = node->as_variable();
2198
2199 if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
2200 /* All interface blocks should have been lowered by this point */
2201 assert(!input_var->type->is_interface());
2202
2203 if (input_var->data.explicit_location) {
2204 /* assign_varying_locations only cares about finding the
2205 * ir_variable at the start of a contiguous location block.
2206 *
2207 * - For !producer, consumer_inputs_with_locations isn't used.
2208 *
2209 * - For !consumer, consumer_inputs_with_locations is empty.
2210 *
2211 * For consumer && producer, if you were trying to set some
2212 * ir_variable to the middle of a location block on the other side
2213 * of producer/consumer, cross_validate_outputs_to_inputs() should
2214 * be link-erroring due to either type mismatch or location
2215 * overlaps. If the variables do match up, then they've got a
2216 * matching data.location and you only looked at
2217 * consumer_inputs_with_locations[var->data.location], not any
2218 * following entries for the array/structure.
2219 */
2220 consumer_inputs_with_locations[input_var->data.location] =
2221 input_var;
2222 } else if (input_var->get_interface_type() != NULL) {
2223 char *const iface_field_name =
2224 ralloc_asprintf(mem_ctx, "%s.%s",
2225 input_var->get_interface_type()->without_array()->name,
2226 input_var->name);
2227 _mesa_hash_table_insert(consumer_interface_inputs,
2228 iface_field_name, input_var);
2229 } else {
2230 _mesa_hash_table_insert(consumer_inputs,
2231 ralloc_strdup(mem_ctx, input_var->name),
2232 input_var);
2233 }
2234 }
2235 }
2236 }
2237
2238 /**
2239 * Find a variable from the consumer that "matches" the specified variable
2240 *
2241 * This function only finds inputs with names that match. There is no
2242 * validation (here) that the types, etc. are compatible.
2243 */
2244 ir_variable *
2245 get_matching_input(void *mem_ctx,
2246 const ir_variable *output_var,
2247 hash_table *consumer_inputs,
2248 hash_table *consumer_interface_inputs,
2249 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
2250 {
2251 ir_variable *input_var;
2252
2253 if (output_var->data.explicit_location) {
2254 input_var = consumer_inputs_with_locations[output_var->data.location];
2255 } else if (output_var->get_interface_type() != NULL) {
2256 char *const iface_field_name =
2257 ralloc_asprintf(mem_ctx, "%s.%s",
2258 output_var->get_interface_type()->without_array()->name,
2259 output_var->name);
2260 hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
2261 input_var = entry ? (ir_variable *) entry->data : NULL;
2262 } else {
2263 hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
2264 input_var = entry ? (ir_variable *) entry->data : NULL;
2265 }
2266
2267 return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2268 ? NULL : input_var;
2269 }
2270
2271 }
2272
2273 static int
2274 io_variable_cmp(const void *_a, const void *_b)
2275 {
2276 const ir_variable *const a = *(const ir_variable **) _a;
2277 const ir_variable *const b = *(const ir_variable **) _b;
2278
2279 if (a->data.explicit_location && b->data.explicit_location)
2280 return b->data.location - a->data.location;
2281
2282 if (a->data.explicit_location && !b->data.explicit_location)
2283 return 1;
2284
2285 if (!a->data.explicit_location && b->data.explicit_location)
2286 return -1;
2287
2288 return -strcmp(a->name, b->name);
2289 }
2290
2291 /**
2292 * Sort the shader IO variables into canonical order
2293 */
2294 static void
2295 canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
2296 {
2297 ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
2298 unsigned num_variables = 0;
2299
2300 foreach_in_list(ir_instruction, node, ir) {
2301 ir_variable *const var = node->as_variable();
2302
2303 if (var == NULL || var->data.mode != io_mode)
2304 continue;
2305
2306 /* If we have already encountered more I/O variables that could
2307 * successfully link, bail.
2308 */
2309 if (num_variables == ARRAY_SIZE(var_table))
2310 return;
2311
2312 var_table[num_variables++] = var;
2313 }
2314
2315 if (num_variables == 0)
2316 return;
2317
2318 /* Sort the list in reverse order (io_variable_cmp handles this). Later
2319 * we're going to push the variables on to the IR list as a stack, so we
2320 * want the last variable (in canonical order) to be first in the list.
2321 */
2322 qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
2323
2324 /* Remove the variable from it's current location in the IR, and put it at
2325 * the front.
2326 */
2327 for (unsigned i = 0; i < num_variables; i++) {
2328 var_table[i]->remove();
2329 ir->push_head(var_table[i]);
2330 }
2331 }
2332
2333 /**
2334 * Generate a bitfield map of the explicit locations for shader varyings.
2335 *
2336 * Note: For Tessellation shaders we are sitting right on the limits of the
2337 * 64 bit map. Per-vertex and per-patch both have separate location domains
2338 * with a max of MAX_VARYING.
2339 */
2340 static uint64_t
2341 reserved_varying_slot(struct gl_linked_shader *stage,
2342 ir_variable_mode io_mode)
2343 {
2344 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
2345 /* Avoid an overflow of the returned value */
2346 assert(MAX_VARYINGS_INCL_PATCH <= 64);
2347
2348 uint64_t slots = 0;
2349 int var_slot;
2350
2351 if (!stage)
2352 return slots;
2353
2354 foreach_in_list(ir_instruction, node, stage->ir) {
2355 ir_variable *const var = node->as_variable();
2356
2357 if (var == NULL || var->data.mode != io_mode ||
2358 !var->data.explicit_location ||
2359 var->data.location < VARYING_SLOT_VAR0)
2360 continue;
2361
2362 var_slot = var->data.location - VARYING_SLOT_VAR0;
2363
2364 unsigned num_elements = get_varying_type(var, stage->Stage)
2365 ->count_attribute_slots(io_mode == ir_var_shader_in &&
2366 stage->Stage == MESA_SHADER_VERTEX);
2367 for (unsigned i = 0; i < num_elements; i++) {
2368 if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
2369 slots |= UINT64_C(1) << var_slot;
2370 var_slot += 1;
2371 }
2372 }
2373
2374 return slots;
2375 }
2376
2377
2378 /**
2379 * Assign locations for all variables that are produced in one pipeline stage
2380 * (the "producer") and consumed in the next stage (the "consumer").
2381 *
2382 * Variables produced by the producer may also be consumed by transform
2383 * feedback.
2384 *
2385 * \param num_tfeedback_decls is the number of declarations indicating
2386 * variables that may be consumed by transform feedback.
2387 *
2388 * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
2389 * representing the result of parsing the strings passed to
2390 * glTransformFeedbackVaryings(). assign_location() will be called for
2391 * each of these objects that matches one of the outputs of the
2392 * producer.
2393 *
2394 * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
2395 * be NULL. In this case, varying locations are assigned solely based on the
2396 * requirements of transform feedback.
2397 */
2398 static bool
2399 assign_varying_locations(struct gl_context *ctx,
2400 void *mem_ctx,
2401 struct gl_shader_program *prog,
2402 gl_linked_shader *producer,
2403 gl_linked_shader *consumer,
2404 unsigned num_tfeedback_decls,
2405 tfeedback_decl *tfeedback_decls,
2406 const uint64_t reserved_slots)
2407 {
2408 /* Tessellation shaders treat inputs and outputs as shared memory and can
2409 * access inputs and outputs of other invocations.
2410 * Therefore, they can't be lowered to temps easily (and definitely not
2411 * efficiently).
2412 */
2413 bool unpackable_tess =
2414 (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
2415 (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
2416 (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
2417
2418 /* Transform feedback code assumes varying arrays are packed, so if the
2419 * driver has disabled varying packing, make sure to at least enable
2420 * packing required by transform feedback.
2421 */
2422 bool xfb_enabled =
2423 ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
2424
2425 /* Disable packing on outward facing interfaces for SSO because in ES we
2426 * need to retain the unpacked varying information for draw time
2427 * validation.
2428 *
2429 * Packing is still enabled on individual arrays, structs, and matrices as
2430 * these are required by the transform feedback code and it is still safe
2431 * to do so. We also enable packing when a varying is only used for
2432 * transform feedback and its not a SSO.
2433 */
2434 bool disable_varying_packing =
2435 ctx->Const.DisableVaryingPacking || unpackable_tess;
2436 if (prog->SeparateShader && (producer == NULL || consumer == NULL))
2437 disable_varying_packing = true;
2438
2439 varying_matches matches(disable_varying_packing, xfb_enabled,
2440 ctx->Extensions.ARB_enhanced_layouts,
2441 producer ? producer->Stage : MESA_SHADER_NONE,
2442 consumer ? consumer->Stage : MESA_SHADER_NONE);
2443 hash_table *tfeedback_candidates =
2444 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2445 _mesa_key_string_equal);
2446 hash_table *consumer_inputs =
2447 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2448 _mesa_key_string_equal);
2449 hash_table *consumer_interface_inputs =
2450 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2451 _mesa_key_string_equal);
2452 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
2453 NULL,
2454 };
2455
2456 unsigned consumer_vertices = 0;
2457 if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
2458 consumer_vertices = prog->Geom.VerticesIn;
2459
2460 /* Operate in a total of four passes.
2461 *
2462 * 1. Sort inputs / outputs into a canonical order. This is necessary so
2463 * that inputs / outputs of separable shaders will be assigned
2464 * predictable locations regardless of the order in which declarations
2465 * appeared in the shader source.
2466 *
2467 * 2. Assign locations for any matching inputs and outputs.
2468 *
2469 * 3. Mark output variables in the producer that do not have locations as
2470 * not being outputs. This lets the optimizer eliminate them.
2471 *
2472 * 4. Mark input variables in the consumer that do not have locations as
2473 * not being inputs. This lets the optimizer eliminate them.
2474 */
2475 if (consumer)
2476 canonicalize_shader_io(consumer->ir, ir_var_shader_in);
2477
2478 if (producer)
2479 canonicalize_shader_io(producer->ir, ir_var_shader_out);
2480
2481 if (consumer)
2482 linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
2483 consumer_inputs,
2484 consumer_interface_inputs,
2485 consumer_inputs_with_locations);
2486
2487 if (producer) {
2488 foreach_in_list(ir_instruction, node, producer->ir) {
2489 ir_variable *const output_var = node->as_variable();
2490
2491 if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
2492 continue;
2493
2494 /* Only geometry shaders can use non-zero streams */
2495 assert(output_var->data.stream == 0 ||
2496 (output_var->data.stream < MAX_VERTEX_STREAMS &&
2497 producer->Stage == MESA_SHADER_GEOMETRY));
2498
2499 if (num_tfeedback_decls > 0) {
2500 tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates);
2501 g.process(output_var);
2502 }
2503
2504 ir_variable *const input_var =
2505 linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
2506 consumer_interface_inputs,
2507 consumer_inputs_with_locations);
2508
2509 /* If a matching input variable was found, add this output (and the
2510 * input) to the set. If this is a separable program and there is no
2511 * consumer stage, add the output.
2512 *
2513 * Always add TCS outputs. They are shared by all invocations
2514 * within a patch and can be used as shared memory.
2515 */
2516 if (input_var || (prog->SeparateShader && consumer == NULL) ||
2517 producer->Stage == MESA_SHADER_TESS_CTRL) {
2518 matches.record(output_var, input_var);
2519 }
2520
2521 /* Only stream 0 outputs can be consumed in the next stage */
2522 if (input_var && output_var->data.stream != 0) {
2523 linker_error(prog, "output %s is assigned to stream=%d but "
2524 "is linked to an input, which requires stream=0",
2525 output_var->name, output_var->data.stream);
2526 return false;
2527 }
2528 }
2529 } else {
2530 /* If there's no producer stage, then this must be a separable program.
2531 * For example, we may have a program that has just a fragment shader.
2532 * Later this program will be used with some arbitrary vertex (or
2533 * geometry) shader program. This means that locations must be assigned
2534 * for all the inputs.
2535 */
2536 foreach_in_list(ir_instruction, node, consumer->ir) {
2537 ir_variable *const input_var = node->as_variable();
2538 if (input_var && input_var->data.mode == ir_var_shader_in) {
2539 matches.record(NULL, input_var);
2540 }
2541 }
2542 }
2543
2544 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2545 if (!tfeedback_decls[i].is_varying())
2546 continue;
2547
2548 const tfeedback_candidate *matched_candidate
2549 = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
2550
2551 if (matched_candidate == NULL) {
2552 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2553 return false;
2554 }
2555
2556 /* Mark xfb varyings as always active */
2557 matched_candidate->toplevel_var->data.always_active_io = 1;
2558
2559 /* Mark any corresponding inputs as always active also. We must do this
2560 * because we have a NIR pass that lowers vectors to scalars and another
2561 * that removes unused varyings.
2562 * We don't split varyings marked as always active because there is no
2563 * point in doing so. This means we need to mark both sides of the
2564 * interface as always active otherwise we will have a mismatch and
2565 * start removing things we shouldn't.
2566 */
2567 ir_variable *const input_var =
2568 linker::get_matching_input(mem_ctx, matched_candidate->toplevel_var,
2569 consumer_inputs,
2570 consumer_interface_inputs,
2571 consumer_inputs_with_locations);
2572 if (input_var)
2573 input_var->data.always_active_io = 1;
2574
2575 if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
2576 matched_candidate->toplevel_var->data.is_xfb_only = 1;
2577 matches.record(matched_candidate->toplevel_var, NULL);
2578 }
2579 }
2580
2581 _mesa_hash_table_destroy(consumer_inputs, NULL);
2582 _mesa_hash_table_destroy(consumer_interface_inputs, NULL);
2583
2584 uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
2585 const unsigned slots_used = matches.assign_locations(
2586 prog, components, reserved_slots);
2587 matches.store_locations();
2588
2589 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2590 if (tfeedback_decls[i].is_varying()) {
2591 if (!tfeedback_decls[i].assign_location(ctx, prog)) {
2592 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2593 return false;
2594 }
2595 }
2596 }
2597 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2598
2599 if (consumer && producer) {
2600 foreach_in_list(ir_instruction, node, consumer->ir) {
2601 ir_variable *const var = node->as_variable();
2602
2603 if (var && var->data.mode == ir_var_shader_in &&
2604 var->data.is_unmatched_generic_inout) {
2605 if (!prog->IsES && prog->data->Version <= 120) {
2606 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
2607 *
2608 * Only those varying variables used (i.e. read) in
2609 * the fragment shader executable must be written to
2610 * by the vertex shader executable; declaring
2611 * superfluous varying variables in a vertex shader is
2612 * permissible.
2613 *
2614 * We interpret this text as meaning that the VS must
2615 * write the variable for the FS to read it. See
2616 * "glsl1-varying read but not written" in piglit.
2617 */
2618 linker_error(prog, "%s shader varying %s not written "
2619 "by %s shader\n.",
2620 _mesa_shader_stage_to_string(consumer->Stage),
2621 var->name,
2622 _mesa_shader_stage_to_string(producer->Stage));
2623 } else {
2624 linker_warning(prog, "%s shader varying %s not written "
2625 "by %s shader\n.",
2626 _mesa_shader_stage_to_string(consumer->Stage),
2627 var->name,
2628 _mesa_shader_stage_to_string(producer->Stage));
2629 }
2630 }
2631 }
2632
2633 /* Now that validation is done its safe to remove unused varyings. As
2634 * we have both a producer and consumer its safe to remove unused
2635 * varyings even if the program is a SSO because the stages are being
2636 * linked together i.e. we have a multi-stage SSO.
2637 */
2638 remove_unused_shader_inputs_and_outputs(false, producer,
2639 ir_var_shader_out);
2640 remove_unused_shader_inputs_and_outputs(false, consumer,
2641 ir_var_shader_in);
2642 }
2643
2644 if (producer) {
2645 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
2646 0, producer, disable_varying_packing,
2647 xfb_enabled);
2648 }
2649
2650 if (consumer) {
2651 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
2652 consumer_vertices, consumer,
2653 disable_varying_packing, xfb_enabled);
2654 }
2655
2656 return true;
2657 }
2658
2659 static bool
2660 check_against_output_limit(struct gl_context *ctx,
2661 struct gl_shader_program *prog,
2662 gl_linked_shader *producer,
2663 unsigned num_explicit_locations)
2664 {
2665 unsigned output_vectors = num_explicit_locations;
2666
2667 foreach_in_list(ir_instruction, node, producer->ir) {
2668 ir_variable *const var = node->as_variable();
2669
2670 if (var && !var->data.explicit_location &&
2671 var->data.mode == ir_var_shader_out &&
2672 var_counts_against_varying_limit(producer->Stage, var)) {
2673 /* outputs for fragment shader can't be doubles */
2674 output_vectors += var->type->count_attribute_slots(false);
2675 }
2676 }
2677
2678 assert(producer->Stage != MESA_SHADER_FRAGMENT);
2679 unsigned max_output_components =
2680 ctx->Const.Program[producer->Stage].MaxOutputComponents;
2681
2682 const unsigned output_components = output_vectors * 4;
2683 if (output_components > max_output_components) {
2684 if (ctx->API == API_OPENGLES2 || prog->IsES)
2685 linker_error(prog, "%s shader uses too many output vectors "
2686 "(%u > %u)\n",
2687 _mesa_shader_stage_to_string(producer->Stage),
2688 output_vectors,
2689 max_output_components / 4);
2690 else
2691 linker_error(prog, "%s shader uses too many output components "
2692 "(%u > %u)\n",
2693 _mesa_shader_stage_to_string(producer->Stage),
2694 output_components,
2695 max_output_components);
2696
2697 return false;
2698 }
2699
2700 return true;
2701 }
2702
2703 static bool
2704 check_against_input_limit(struct gl_context *ctx,
2705 struct gl_shader_program *prog,
2706 gl_linked_shader *consumer,
2707 unsigned num_explicit_locations)
2708 {
2709 unsigned input_vectors = num_explicit_locations;
2710
2711 foreach_in_list(ir_instruction, node, consumer->ir) {
2712 ir_variable *const var = node->as_variable();
2713
2714 if (var && !var->data.explicit_location &&
2715 var->data.mode == ir_var_shader_in &&
2716 var_counts_against_varying_limit(consumer->Stage, var)) {
2717 /* vertex inputs aren't varying counted */
2718 input_vectors += var->type->count_attribute_slots(false);
2719 }
2720 }
2721
2722 assert(consumer->Stage != MESA_SHADER_VERTEX);
2723 unsigned max_input_components =
2724 ctx->Const.Program[consumer->Stage].MaxInputComponents;
2725
2726 const unsigned input_components = input_vectors * 4;
2727 if (input_components > max_input_components) {
2728 if (ctx->API == API_OPENGLES2 || prog->IsES)
2729 linker_error(prog, "%s shader uses too many input vectors "
2730 "(%u > %u)\n",
2731 _mesa_shader_stage_to_string(consumer->Stage),
2732 input_vectors,
2733 max_input_components / 4);
2734 else
2735 linker_error(prog, "%s shader uses too many input components "
2736 "(%u > %u)\n",
2737 _mesa_shader_stage_to_string(consumer->Stage),
2738 input_components,
2739 max_input_components);
2740
2741 return false;
2742 }
2743
2744 return true;
2745 }
2746
2747 bool
2748 link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
2749 struct gl_context *ctx, void *mem_ctx)
2750 {
2751 bool has_xfb_qualifiers = false;
2752 unsigned num_tfeedback_decls = 0;
2753 char **varying_names = NULL;
2754 tfeedback_decl *tfeedback_decls = NULL;
2755
2756 /* From the ARB_enhanced_layouts spec:
2757 *
2758 * "If the shader used to record output variables for transform feedback
2759 * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
2760 * qualifiers, the values specified by TransformFeedbackVaryings are
2761 * ignored, and the set of variables captured for transform feedback is
2762 * instead derived from the specified layout qualifiers."
2763 */
2764 for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
2765 /* Find last stage before fragment shader */
2766 if (prog->_LinkedShaders[i]) {
2767 has_xfb_qualifiers =
2768 process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
2769 prog, &num_tfeedback_decls,
2770 &varying_names);
2771 break;
2772 }
2773 }
2774
2775 if (!has_xfb_qualifiers) {
2776 num_tfeedback_decls = prog->TransformFeedback.NumVarying;
2777 varying_names = prog->TransformFeedback.VaryingNames;
2778 }
2779
2780 if (num_tfeedback_decls != 0) {
2781 /* From GL_EXT_transform_feedback:
2782 * A program will fail to link if:
2783 *
2784 * * the <count> specified by TransformFeedbackVaryingsEXT is
2785 * non-zero, but the program object has no vertex or geometry
2786 * shader;
2787 */
2788 if (first >= MESA_SHADER_FRAGMENT) {
2789 linker_error(prog, "Transform feedback varyings specified, but "
2790 "no vertex, tessellation, or geometry shader is "
2791 "present.\n");
2792 return false;
2793 }
2794
2795 tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
2796 num_tfeedback_decls);
2797 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2798 varying_names, tfeedback_decls))
2799 return false;
2800 }
2801
2802 /* If there is no fragment shader we need to set transform feedback.
2803 *
2804 * For SSO we also need to assign output locations. We assign them here
2805 * because we need to do it for both single stage programs and multi stage
2806 * programs.
2807 */
2808 if (last < MESA_SHADER_FRAGMENT &&
2809 (num_tfeedback_decls != 0 || prog->SeparateShader)) {
2810 const uint64_t reserved_out_slots =
2811 reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
2812 if (!assign_varying_locations(ctx, mem_ctx, prog,
2813 prog->_LinkedShaders[last], NULL,
2814 num_tfeedback_decls, tfeedback_decls,
2815 reserved_out_slots))
2816 return false;
2817 }
2818
2819 if (last <= MESA_SHADER_FRAGMENT) {
2820 /* Remove unused varyings from the first/last stage unless SSO */
2821 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2822 prog->_LinkedShaders[first],
2823 ir_var_shader_in);
2824 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2825 prog->_LinkedShaders[last],
2826 ir_var_shader_out);
2827
2828 /* If the program is made up of only a single stage */
2829 if (first == last) {
2830 gl_linked_shader *const sh = prog->_LinkedShaders[last];
2831
2832 do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
2833 do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
2834 tfeedback_decls);
2835
2836 if (prog->SeparateShader) {
2837 const uint64_t reserved_slots =
2838 reserved_varying_slot(sh, ir_var_shader_in);
2839
2840 /* Assign input locations for SSO, output locations are already
2841 * assigned.
2842 */
2843 if (!assign_varying_locations(ctx, mem_ctx, prog,
2844 NULL /* producer */,
2845 sh /* consumer */,
2846 0 /* num_tfeedback_decls */,
2847 NULL /* tfeedback_decls */,
2848 reserved_slots))
2849 return false;
2850 }
2851 } else {
2852 /* Linking the stages in the opposite order (from fragment to vertex)
2853 * ensures that inter-shader outputs written to in an earlier stage
2854 * are eliminated if they are (transitively) not used in a later
2855 * stage.
2856 */
2857 int next = last;
2858 for (int i = next - 1; i >= 0; i--) {
2859 if (prog->_LinkedShaders[i] == NULL && i != 0)
2860 continue;
2861
2862 gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
2863 gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
2864
2865 const uint64_t reserved_out_slots =
2866 reserved_varying_slot(sh_i, ir_var_shader_out);
2867 const uint64_t reserved_in_slots =
2868 reserved_varying_slot(sh_next, ir_var_shader_in);
2869
2870 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2871 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2872 tfeedback_decls);
2873
2874 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2875 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2876 tfeedback_decls,
2877 reserved_out_slots | reserved_in_slots))
2878 return false;
2879
2880 /* This must be done after all dead varyings are eliminated. */
2881 if (sh_i != NULL) {
2882 unsigned slots_used = _mesa_bitcount_64(reserved_out_slots);
2883 if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
2884 return false;
2885 }
2886 }
2887
2888 unsigned slots_used = _mesa_bitcount_64(reserved_in_slots);
2889 if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
2890 return false;
2891
2892 next = i;
2893 }
2894 }
2895 }
2896
2897 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
2898 has_xfb_qualifiers))
2899 return false;
2900
2901 return true;
2902 }