glsl: mark xfb varyings as always active
[mesa.git] / src / compiler / glsl / link_varyings.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file link_varyings.cpp
26 *
27 * Linker functions related specifically to linking varyings between shader
28 * stages.
29 */
30
31
32 #include "main/mtypes.h"
33 #include "glsl_symbol_table.h"
34 #include "glsl_parser_extras.h"
35 #include "ir_optimization.h"
36 #include "linker.h"
37 #include "link_varyings.h"
38 #include "main/macros.h"
39 #include "util/hash_table.h"
40 #include "program.h"
41
42
43 /**
44 * Get the varying type stripped of the outermost array if we're processing
45 * a stage whose varyings are arrays indexed by a vertex number (such as
46 * geometry shader inputs).
47 */
48 static const glsl_type *
49 get_varying_type(const ir_variable *var, gl_shader_stage stage)
50 {
51 const glsl_type *type = var->type;
52
53 if (!var->data.patch &&
54 ((var->data.mode == ir_var_shader_out &&
55 stage == MESA_SHADER_TESS_CTRL) ||
56 (var->data.mode == ir_var_shader_in &&
57 (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
58 stage == MESA_SHADER_GEOMETRY)))) {
59 assert(type->is_array());
60 type = type->fields.array;
61 }
62
63 return type;
64 }
65
66 static void
67 create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
68 size_t name_length, unsigned *count,
69 const char *ifc_member_name,
70 const glsl_type *ifc_member_t, char ***varying_names)
71 {
72 if (t->is_interface()) {
73 size_t new_length = name_length;
74
75 assert(ifc_member_name && ifc_member_t);
76 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
77
78 create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
79 NULL, NULL, varying_names);
80 } else if (t->is_record()) {
81 for (unsigned i = 0; i < t->length; i++) {
82 const char *field = t->fields.structure[i].name;
83 size_t new_length = name_length;
84
85 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
86
87 create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
88 new_length, count, NULL, NULL,
89 varying_names);
90 }
91 } else if (t->without_array()->is_record() ||
92 t->without_array()->is_interface() ||
93 (t->is_array() && t->fields.array->is_array())) {
94 for (unsigned i = 0; i < t->length; i++) {
95 size_t new_length = name_length;
96
97 /* Append the subscript to the current variable name */
98 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
99
100 create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
101 count, ifc_member_name, ifc_member_t,
102 varying_names);
103 }
104 } else {
105 (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
106 }
107 }
108
109 static bool
110 process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
111 struct gl_shader_program *prog,
112 unsigned *num_tfeedback_decls,
113 char ***varying_names)
114 {
115 bool has_xfb_qualifiers = false;
116
117 /* We still need to enable transform feedback mode even if xfb_stride is
118 * only applied to a global out. Also we don't bother to propagate
119 * xfb_stride to interface block members so this will catch that case also.
120 */
121 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
122 if (prog->TransformFeedback.BufferStride[j]) {
123 has_xfb_qualifiers = true;
124 break;
125 }
126 }
127
128 foreach_in_list(ir_instruction, node, sh->ir) {
129 ir_variable *var = node->as_variable();
130 if (!var || var->data.mode != ir_var_shader_out)
131 continue;
132
133 /* From the ARB_enhanced_layouts spec:
134 *
135 * "Any shader making any static use (after preprocessing) of any of
136 * these *xfb_* qualifiers will cause the shader to be in a
137 * transform feedback capturing mode and hence responsible for
138 * describing the transform feedback setup. This mode will capture
139 * any output selected by *xfb_offset*, directly or indirectly, to
140 * a transform feedback buffer."
141 */
142 if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
143 has_xfb_qualifiers = true;
144 }
145
146 if (var->data.explicit_xfb_offset) {
147 *num_tfeedback_decls += var->type->varying_count();
148 has_xfb_qualifiers = true;
149 }
150 }
151
152 if (*num_tfeedback_decls == 0)
153 return has_xfb_qualifiers;
154
155 unsigned i = 0;
156 *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
157 foreach_in_list(ir_instruction, node, sh->ir) {
158 ir_variable *var = node->as_variable();
159 if (!var || var->data.mode != ir_var_shader_out)
160 continue;
161
162 if (var->data.explicit_xfb_offset) {
163 char *name;
164 const glsl_type *type, *member_type;
165
166 if (var->data.from_named_ifc_block) {
167 type = var->get_interface_type();
168 /* Find the member type before it was altered by lowering */
169 member_type =
170 type->fields.structure[type->field_index(var->name)].type;
171 name = ralloc_strdup(NULL, type->without_array()->name);
172 } else {
173 type = var->type;
174 member_type = NULL;
175 name = ralloc_strdup(NULL, var->name);
176 }
177 create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
178 var->name, member_type, varying_names);
179 ralloc_free(name);
180 }
181 }
182
183 assert(i == *num_tfeedback_decls);
184 return has_xfb_qualifiers;
185 }
186
187 /**
188 * Validate the types and qualifiers of an output from one stage against the
189 * matching input to another stage.
190 */
191 static void
192 cross_validate_types_and_qualifiers(struct gl_shader_program *prog,
193 const ir_variable *input,
194 const ir_variable *output,
195 gl_shader_stage consumer_stage,
196 gl_shader_stage producer_stage)
197 {
198 /* Check that the types match between stages.
199 */
200 const glsl_type *type_to_match = input->type;
201
202 /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
203 const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
204 consumer_stage != MESA_SHADER_FRAGMENT) ||
205 consumer_stage == MESA_SHADER_GEOMETRY;
206 if (extra_array_level) {
207 assert(type_to_match->is_array());
208 type_to_match = type_to_match->fields.array;
209 }
210
211 if (type_to_match != output->type) {
212 /* There is a bit of a special case for gl_TexCoord. This
213 * built-in is unsized by default. Applications that variable
214 * access it must redeclare it with a size. There is some
215 * language in the GLSL spec that implies the fragment shader
216 * and vertex shader do not have to agree on this size. Other
217 * driver behave this way, and one or two applications seem to
218 * rely on it.
219 *
220 * Neither declaration needs to be modified here because the array
221 * sizes are fixed later when update_array_sizes is called.
222 *
223 * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
224 *
225 * "Unlike user-defined varying variables, the built-in
226 * varying variables don't have a strict one-to-one
227 * correspondence between the vertex language and the
228 * fragment language."
229 */
230 if (!output->type->is_array() || !is_gl_identifier(output->name)) {
231 linker_error(prog,
232 "%s shader output `%s' declared as type `%s', "
233 "but %s shader input declared as type `%s'\n",
234 _mesa_shader_stage_to_string(producer_stage),
235 output->name,
236 output->type->name,
237 _mesa_shader_stage_to_string(consumer_stage),
238 input->type->name);
239 return;
240 }
241 }
242
243 /* Check that all of the qualifiers match between stages.
244 */
245
246 /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
247 * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
248 * conformance test suite does not verify that the qualifiers must match.
249 * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
250 * OpenGLES 3.0 drivers, so we relax the checking in all cases.
251 */
252 if (false /* always skip the centroid check */ &&
253 prog->data->Version < (prog->IsES ? 310 : 430) &&
254 input->data.centroid != output->data.centroid) {
255 linker_error(prog,
256 "%s shader output `%s' %s centroid qualifier, "
257 "but %s shader input %s centroid qualifier\n",
258 _mesa_shader_stage_to_string(producer_stage),
259 output->name,
260 (output->data.centroid) ? "has" : "lacks",
261 _mesa_shader_stage_to_string(consumer_stage),
262 (input->data.centroid) ? "has" : "lacks");
263 return;
264 }
265
266 if (input->data.sample != output->data.sample) {
267 linker_error(prog,
268 "%s shader output `%s' %s sample qualifier, "
269 "but %s shader input %s sample qualifier\n",
270 _mesa_shader_stage_to_string(producer_stage),
271 output->name,
272 (output->data.sample) ? "has" : "lacks",
273 _mesa_shader_stage_to_string(consumer_stage),
274 (input->data.sample) ? "has" : "lacks");
275 return;
276 }
277
278 if (input->data.patch != output->data.patch) {
279 linker_error(prog,
280 "%s shader output `%s' %s patch qualifier, "
281 "but %s shader input %s patch qualifier\n",
282 _mesa_shader_stage_to_string(producer_stage),
283 output->name,
284 (output->data.patch) ? "has" : "lacks",
285 _mesa_shader_stage_to_string(consumer_stage),
286 (input->data.patch) ? "has" : "lacks");
287 return;
288 }
289
290 /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
291 *
292 * "As only outputs need be declared with invariant, an output from
293 * one shader stage will still match an input of a subsequent stage
294 * without the input being declared as invariant."
295 *
296 * while GLSL 4.20 says:
297 *
298 * "For variables leaving one shader and coming into another shader,
299 * the invariant keyword has to be used in both shaders, or a link
300 * error will result."
301 *
302 * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
303 *
304 * "The invariance of varyings that are declared in both the vertex
305 * and fragment shaders must match."
306 */
307 if (input->data.invariant != output->data.invariant &&
308 prog->data->Version < (prog->IsES ? 300 : 430)) {
309 linker_error(prog,
310 "%s shader output `%s' %s invariant qualifier, "
311 "but %s shader input %s invariant qualifier\n",
312 _mesa_shader_stage_to_string(producer_stage),
313 output->name,
314 (output->data.invariant) ? "has" : "lacks",
315 _mesa_shader_stage_to_string(consumer_stage),
316 (input->data.invariant) ? "has" : "lacks");
317 return;
318 }
319
320 /* GLSL >= 4.40 removes text requiring interpolation qualifiers
321 * to match cross stage, they must only match within the same stage.
322 *
323 * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
324 *
325 * "It is a link-time error if, within the same stage, the interpolation
326 * qualifiers of variables of the same name do not match.
327 *
328 */
329 if (input->data.interpolation != output->data.interpolation &&
330 prog->data->Version < 440) {
331 linker_error(prog,
332 "%s shader output `%s' specifies %s "
333 "interpolation qualifier, "
334 "but %s shader input specifies %s "
335 "interpolation qualifier\n",
336 _mesa_shader_stage_to_string(producer_stage),
337 output->name,
338 interpolation_string(output->data.interpolation),
339 _mesa_shader_stage_to_string(consumer_stage),
340 interpolation_string(input->data.interpolation));
341 return;
342 }
343 }
344
345 /**
346 * Validate front and back color outputs against single color input
347 */
348 static void
349 cross_validate_front_and_back_color(struct gl_shader_program *prog,
350 const ir_variable *input,
351 const ir_variable *front_color,
352 const ir_variable *back_color,
353 gl_shader_stage consumer_stage,
354 gl_shader_stage producer_stage)
355 {
356 if (front_color != NULL && front_color->data.assigned)
357 cross_validate_types_and_qualifiers(prog, input, front_color,
358 consumer_stage, producer_stage);
359
360 if (back_color != NULL && back_color->data.assigned)
361 cross_validate_types_and_qualifiers(prog, input, back_color,
362 consumer_stage, producer_stage);
363 }
364
365 /**
366 * Validate that outputs from one stage match inputs of another
367 */
368 void
369 cross_validate_outputs_to_inputs(struct gl_shader_program *prog,
370 gl_linked_shader *producer,
371 gl_linked_shader *consumer)
372 {
373 glsl_symbol_table parameters;
374 ir_variable *explicit_locations[MAX_VARYINGS_INCL_PATCH][4] =
375 { {NULL, NULL} };
376
377 /* Find all shader outputs in the "producer" stage.
378 */
379 foreach_in_list(ir_instruction, node, producer->ir) {
380 ir_variable *const var = node->as_variable();
381
382 if (var == NULL || var->data.mode != ir_var_shader_out)
383 continue;
384
385 if (!var->data.explicit_location
386 || var->data.location < VARYING_SLOT_VAR0)
387 parameters.add_variable(var);
388 else {
389 /* User-defined varyings with explicit locations are handled
390 * differently because they do not need to have matching names.
391 */
392 const glsl_type *type = get_varying_type(var, producer->Stage);
393 unsigned num_elements = type->count_attribute_slots(false);
394 unsigned idx = var->data.location - VARYING_SLOT_VAR0;
395 unsigned slot_limit = idx + num_elements;
396 unsigned last_comp;
397
398 if (type->without_array()->is_record()) {
399 /* The component qualifier can't be used on structs so just treat
400 * all component slots as used.
401 */
402 last_comp = 4;
403 } else {
404 unsigned dmul = type->without_array()->is_64bit() ? 2 : 1;
405 last_comp = var->data.location_frac +
406 type->without_array()->vector_elements * dmul;
407 }
408
409 while (idx < slot_limit) {
410 unsigned i = var->data.location_frac;
411 while (i < last_comp) {
412 if (explicit_locations[idx][i] != NULL) {
413 linker_error(prog,
414 "%s shader has multiple outputs explicitly "
415 "assigned to location %d and component %d\n",
416 _mesa_shader_stage_to_string(producer->Stage),
417 idx, var->data.location_frac);
418 return;
419 }
420
421 /* Make sure all component at this location have the same type.
422 */
423 for (unsigned j = 0; j < 4; j++) {
424 if (explicit_locations[idx][j] &&
425 (explicit_locations[idx][j]->type->without_array()
426 ->base_type != type->without_array()->base_type)) {
427 linker_error(prog,
428 "Varyings sharing the same location must "
429 "have the same underlying numerical type. "
430 "Location %u component %u\n", idx,
431 var->data.location_frac);
432 return;
433 }
434 }
435
436 explicit_locations[idx][i] = var;
437 i++;
438
439 /* We need to do some special handling for doubles as dvec3 and
440 * dvec4 consume two consecutive locations. We don't need to
441 * worry about components beginning at anything other than 0 as
442 * the spec does not allow this for dvec3 and dvec4.
443 */
444 if (i == 4 && last_comp > 4) {
445 last_comp = last_comp - 4;
446 /* Bump location index and reset the component index */
447 idx++;
448 i = 0;
449 }
450 }
451 idx++;
452 }
453 }
454 }
455
456
457 /* Find all shader inputs in the "consumer" stage. Any variables that have
458 * matching outputs already in the symbol table must have the same type and
459 * qualifiers.
460 *
461 * Exception: if the consumer is the geometry shader, then the inputs
462 * should be arrays and the type of the array element should match the type
463 * of the corresponding producer output.
464 */
465 foreach_in_list(ir_instruction, node, consumer->ir) {
466 ir_variable *const input = node->as_variable();
467
468 if (input == NULL || input->data.mode != ir_var_shader_in)
469 continue;
470
471 if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
472 const ir_variable *const front_color =
473 parameters.get_variable("gl_FrontColor");
474
475 const ir_variable *const back_color =
476 parameters.get_variable("gl_BackColor");
477
478 cross_validate_front_and_back_color(prog, input,
479 front_color, back_color,
480 consumer->Stage, producer->Stage);
481 } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
482 const ir_variable *const front_color =
483 parameters.get_variable("gl_FrontSecondaryColor");
484
485 const ir_variable *const back_color =
486 parameters.get_variable("gl_BackSecondaryColor");
487
488 cross_validate_front_and_back_color(prog, input,
489 front_color, back_color,
490 consumer->Stage, producer->Stage);
491 } else {
492 /* The rules for connecting inputs and outputs change in the presence
493 * of explicit locations. In this case, we no longer care about the
494 * names of the variables. Instead, we care only about the
495 * explicitly assigned location.
496 */
497 ir_variable *output = NULL;
498 if (input->data.explicit_location
499 && input->data.location >= VARYING_SLOT_VAR0) {
500
501 const glsl_type *type = get_varying_type(input, consumer->Stage);
502 unsigned num_elements = type->count_attribute_slots(false);
503 unsigned idx = input->data.location - VARYING_SLOT_VAR0;
504 unsigned slot_limit = idx + num_elements;
505
506 while (idx < slot_limit) {
507 output = explicit_locations[idx][input->data.location_frac];
508
509 if (output == NULL ||
510 input->data.location != output->data.location) {
511 linker_error(prog,
512 "%s shader input `%s' with explicit location "
513 "has no matching output\n",
514 _mesa_shader_stage_to_string(consumer->Stage),
515 input->name);
516 break;
517 }
518 idx++;
519 }
520 } else {
521 output = parameters.get_variable(input->name);
522 }
523
524 if (output != NULL) {
525 /* Interface blocks have their own validation elsewhere so don't
526 * try validating them here.
527 */
528 if (!(input->get_interface_type() &&
529 output->get_interface_type()))
530 cross_validate_types_and_qualifiers(prog, input, output,
531 consumer->Stage,
532 producer->Stage);
533 } else {
534 /* Check for input vars with unmatched output vars in prev stage
535 * taking into account that interface blocks could have a matching
536 * output but with different name, so we ignore them.
537 */
538 assert(!input->data.assigned);
539 if (input->data.used && !input->get_interface_type() &&
540 !input->data.explicit_location && !prog->SeparateShader)
541 linker_error(prog,
542 "%s shader input `%s' "
543 "has no matching output in the previous stage\n",
544 _mesa_shader_stage_to_string(consumer->Stage),
545 input->name);
546 }
547 }
548 }
549 }
550
551 /**
552 * Demote shader inputs and outputs that are not used in other stages, and
553 * remove them via dead code elimination.
554 */
555 static void
556 remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
557 gl_linked_shader *sh,
558 enum ir_variable_mode mode)
559 {
560 if (is_separate_shader_object)
561 return;
562
563 foreach_in_list(ir_instruction, node, sh->ir) {
564 ir_variable *const var = node->as_variable();
565
566 if (var == NULL || var->data.mode != int(mode))
567 continue;
568
569 /* A shader 'in' or 'out' variable is only really an input or output if
570 * its value is used by other shader stages. This will cause the
571 * variable to have a location assigned.
572 */
573 if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
574 assert(var->data.mode != ir_var_temporary);
575
576 /* Assign zeros to demoted inputs to allow more optimizations. */
577 if (var->data.mode == ir_var_shader_in && !var->constant_value)
578 var->constant_value = ir_constant::zero(var, var->type);
579
580 var->data.mode = ir_var_auto;
581 }
582 }
583
584 /* Eliminate code that is now dead due to unused inputs/outputs being
585 * demoted.
586 */
587 while (do_dead_code(sh->ir, false))
588 ;
589
590 }
591
592 /**
593 * Initialize this object based on a string that was passed to
594 * glTransformFeedbackVaryings.
595 *
596 * If the input is mal-formed, this call still succeeds, but it sets
597 * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
598 * will fail to find any matching variable.
599 */
600 void
601 tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
602 const char *input)
603 {
604 /* We don't have to be pedantic about what is a valid GLSL variable name,
605 * because any variable with an invalid name can't exist in the IR anyway.
606 */
607
608 this->location = -1;
609 this->orig_name = input;
610 this->lowered_builtin_array_variable = none;
611 this->skip_components = 0;
612 this->next_buffer_separator = false;
613 this->matched_candidate = NULL;
614 this->stream_id = 0;
615 this->buffer = 0;
616 this->offset = 0;
617
618 if (ctx->Extensions.ARB_transform_feedback3) {
619 /* Parse gl_NextBuffer. */
620 if (strcmp(input, "gl_NextBuffer") == 0) {
621 this->next_buffer_separator = true;
622 return;
623 }
624
625 /* Parse gl_SkipComponents. */
626 if (strcmp(input, "gl_SkipComponents1") == 0)
627 this->skip_components = 1;
628 else if (strcmp(input, "gl_SkipComponents2") == 0)
629 this->skip_components = 2;
630 else if (strcmp(input, "gl_SkipComponents3") == 0)
631 this->skip_components = 3;
632 else if (strcmp(input, "gl_SkipComponents4") == 0)
633 this->skip_components = 4;
634
635 if (this->skip_components)
636 return;
637 }
638
639 /* Parse a declaration. */
640 const char *base_name_end;
641 long subscript = parse_program_resource_name(input, &base_name_end);
642 this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
643 if (this->var_name == NULL) {
644 _mesa_error_no_memory(__func__);
645 return;
646 }
647
648 if (subscript >= 0) {
649 this->array_subscript = subscript;
650 this->is_subscripted = true;
651 } else {
652 this->is_subscripted = false;
653 }
654
655 /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
656 * class must behave specially to account for the fact that gl_ClipDistance
657 * is converted from a float[8] to a vec4[2].
658 */
659 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
660 strcmp(this->var_name, "gl_ClipDistance") == 0) {
661 this->lowered_builtin_array_variable = clip_distance;
662 }
663 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
664 strcmp(this->var_name, "gl_CullDistance") == 0) {
665 this->lowered_builtin_array_variable = cull_distance;
666 }
667
668 if (ctx->Const.LowerTessLevel &&
669 (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
670 this->lowered_builtin_array_variable = tess_level_outer;
671 if (ctx->Const.LowerTessLevel &&
672 (strcmp(this->var_name, "gl_TessLevelInner") == 0))
673 this->lowered_builtin_array_variable = tess_level_inner;
674 }
675
676
677 /**
678 * Determine whether two tfeedback_decl objects refer to the same variable and
679 * array index (if applicable).
680 */
681 bool
682 tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
683 {
684 assert(x.is_varying() && y.is_varying());
685
686 if (strcmp(x.var_name, y.var_name) != 0)
687 return false;
688 if (x.is_subscripted != y.is_subscripted)
689 return false;
690 if (x.is_subscripted && x.array_subscript != y.array_subscript)
691 return false;
692 return true;
693 }
694
695
696 /**
697 * Assign a location and stream ID for this tfeedback_decl object based on the
698 * transform feedback candidate found by find_candidate.
699 *
700 * If an error occurs, the error is reported through linker_error() and false
701 * is returned.
702 */
703 bool
704 tfeedback_decl::assign_location(struct gl_context *ctx,
705 struct gl_shader_program *prog)
706 {
707 assert(this->is_varying());
708
709 unsigned fine_location
710 = this->matched_candidate->toplevel_var->data.location * 4
711 + this->matched_candidate->toplevel_var->data.location_frac
712 + this->matched_candidate->offset;
713 const unsigned dmul =
714 this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
715
716 if (this->matched_candidate->type->is_array()) {
717 /* Array variable */
718 const unsigned matrix_cols =
719 this->matched_candidate->type->fields.array->matrix_columns;
720 const unsigned vector_elements =
721 this->matched_candidate->type->fields.array->vector_elements;
722 unsigned actual_array_size;
723 switch (this->lowered_builtin_array_variable) {
724 case clip_distance:
725 actual_array_size = prog->last_vert_prog ?
726 prog->last_vert_prog->info.clip_distance_array_size : 0;
727 break;
728 case cull_distance:
729 actual_array_size = prog->last_vert_prog ?
730 prog->last_vert_prog->info.cull_distance_array_size : 0;
731 break;
732 case tess_level_outer:
733 actual_array_size = 4;
734 break;
735 case tess_level_inner:
736 actual_array_size = 2;
737 break;
738 case none:
739 default:
740 actual_array_size = this->matched_candidate->type->array_size();
741 break;
742 }
743
744 if (this->is_subscripted) {
745 /* Check array bounds. */
746 if (this->array_subscript >= actual_array_size) {
747 linker_error(prog, "Transform feedback varying %s has index "
748 "%i, but the array size is %u.",
749 this->orig_name, this->array_subscript,
750 actual_array_size);
751 return false;
752 }
753 unsigned array_elem_size = this->lowered_builtin_array_variable ?
754 1 : vector_elements * matrix_cols * dmul;
755 fine_location += array_elem_size * this->array_subscript;
756 this->size = 1;
757 } else {
758 this->size = actual_array_size;
759 }
760 this->vector_elements = vector_elements;
761 this->matrix_columns = matrix_cols;
762 if (this->lowered_builtin_array_variable)
763 this->type = GL_FLOAT;
764 else
765 this->type = this->matched_candidate->type->fields.array->gl_type;
766 } else {
767 /* Regular variable (scalar, vector, or matrix) */
768 if (this->is_subscripted) {
769 linker_error(prog, "Transform feedback varying %s requested, "
770 "but %s is not an array.",
771 this->orig_name, this->var_name);
772 return false;
773 }
774 this->size = 1;
775 this->vector_elements = this->matched_candidate->type->vector_elements;
776 this->matrix_columns = this->matched_candidate->type->matrix_columns;
777 this->type = this->matched_candidate->type->gl_type;
778 }
779 this->location = fine_location / 4;
780 this->location_frac = fine_location % 4;
781
782 /* From GL_EXT_transform_feedback:
783 * A program will fail to link if:
784 *
785 * * the total number of components to capture in any varying
786 * variable in <varyings> is greater than the constant
787 * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
788 * buffer mode is SEPARATE_ATTRIBS_EXT;
789 */
790 if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
791 this->num_components() >
792 ctx->Const.MaxTransformFeedbackSeparateComponents) {
793 linker_error(prog, "Transform feedback varying %s exceeds "
794 "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
795 this->orig_name);
796 return false;
797 }
798
799 /* Only transform feedback varyings can be assigned to non-zero streams,
800 * so assign the stream id here.
801 */
802 this->stream_id = this->matched_candidate->toplevel_var->data.stream;
803
804 unsigned array_offset = this->array_subscript * 4 * dmul;
805 unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
806 this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
807 this->offset = this->matched_candidate->toplevel_var->data.offset +
808 array_offset + struct_offset;
809
810 return true;
811 }
812
813
814 unsigned
815 tfeedback_decl::get_num_outputs() const
816 {
817 if (!this->is_varying()) {
818 return 0;
819 }
820 return (this->num_components() + this->location_frac + 3)/4;
821 }
822
823
824 /**
825 * Update gl_transform_feedback_info to reflect this tfeedback_decl.
826 *
827 * If an error occurs, the error is reported through linker_error() and false
828 * is returned.
829 */
830 bool
831 tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
832 struct gl_transform_feedback_info *info,
833 unsigned buffer, unsigned buffer_index,
834 const unsigned max_outputs, bool *explicit_stride,
835 bool has_xfb_qualifiers) const
836 {
837 unsigned xfb_offset = 0;
838 unsigned size = this->size;
839 /* Handle gl_SkipComponents. */
840 if (this->skip_components) {
841 info->Buffers[buffer].Stride += this->skip_components;
842 size = this->skip_components;
843 goto store_varying;
844 }
845
846 if (this->next_buffer_separator) {
847 size = 0;
848 goto store_varying;
849 }
850
851 if (has_xfb_qualifiers) {
852 xfb_offset = this->offset / 4;
853 } else {
854 xfb_offset = info->Buffers[buffer].Stride;
855 }
856 info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
857
858 {
859 unsigned location = this->location;
860 unsigned location_frac = this->location_frac;
861 unsigned num_components = this->num_components();
862 while (num_components > 0) {
863 unsigned output_size = MIN2(num_components, 4 - location_frac);
864 assert((info->NumOutputs == 0 && max_outputs == 0) ||
865 info->NumOutputs < max_outputs);
866
867 /* From the ARB_enhanced_layouts spec:
868 *
869 * "If such a block member or variable is not written during a shader
870 * invocation, the buffer contents at the assigned offset will be
871 * undefined. Even if there are no static writes to a variable or
872 * member that is assigned a transform feedback offset, the space is
873 * still allocated in the buffer and still affects the stride."
874 */
875 if (this->is_varying_written()) {
876 info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
877 info->Outputs[info->NumOutputs].OutputRegister = location;
878 info->Outputs[info->NumOutputs].NumComponents = output_size;
879 info->Outputs[info->NumOutputs].StreamId = stream_id;
880 info->Outputs[info->NumOutputs].OutputBuffer = buffer;
881 info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
882 ++info->NumOutputs;
883 }
884 info->Buffers[buffer].Stream = this->stream_id;
885 xfb_offset += output_size;
886
887 num_components -= output_size;
888 location++;
889 location_frac = 0;
890 }
891 }
892
893 if (explicit_stride && explicit_stride[buffer]) {
894 if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
895 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
896 "multiple of 8 as its applied to a type that is or "
897 "contains a double.",
898 info->Buffers[buffer].Stride * 4);
899 return false;
900 }
901
902 if ((this->offset / 4) / info->Buffers[buffer].Stride !=
903 (xfb_offset - 1) / info->Buffers[buffer].Stride) {
904 linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
905 "buffer (%d)", xfb_offset * 4,
906 info->Buffers[buffer].Stride * 4, buffer);
907 return false;
908 }
909 } else {
910 info->Buffers[buffer].Stride = xfb_offset;
911 }
912
913 /* From GL_EXT_transform_feedback:
914 * A program will fail to link if:
915 *
916 * * the total number of components to capture is greater than
917 * the constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
918 * and the buffer mode is INTERLEAVED_ATTRIBS_EXT.
919 *
920 * From GL_ARB_enhanced_layouts:
921 *
922 * "The resulting stride (implicit or explicit) must be less than or
923 * equal to the implementation-dependent constant
924 * gl_MaxTransformFeedbackInterleavedComponents."
925 */
926 if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
927 has_xfb_qualifiers) &&
928 info->Buffers[buffer].Stride >
929 ctx->Const.MaxTransformFeedbackInterleavedComponents) {
930 linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
931 "limit has been exceeded.");
932 return false;
933 }
934
935 store_varying:
936 info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
937 this->orig_name);
938 info->Varyings[info->NumVarying].Type = this->type;
939 info->Varyings[info->NumVarying].Size = size;
940 info->Varyings[info->NumVarying].BufferIndex = buffer_index;
941 info->NumVarying++;
942 info->Buffers[buffer].NumVaryings++;
943
944 return true;
945 }
946
947
948 const tfeedback_candidate *
949 tfeedback_decl::find_candidate(gl_shader_program *prog,
950 hash_table *tfeedback_candidates)
951 {
952 const char *name = this->var_name;
953 switch (this->lowered_builtin_array_variable) {
954 case none:
955 name = this->var_name;
956 break;
957 case clip_distance:
958 name = "gl_ClipDistanceMESA";
959 break;
960 case cull_distance:
961 name = "gl_CullDistanceMESA";
962 break;
963 case tess_level_outer:
964 name = "gl_TessLevelOuterMESA";
965 break;
966 case tess_level_inner:
967 name = "gl_TessLevelInnerMESA";
968 break;
969 }
970 hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
971
972 this->matched_candidate = entry ?
973 (const tfeedback_candidate *) entry->data : NULL;
974
975 if (!this->matched_candidate) {
976 /* From GL_EXT_transform_feedback:
977 * A program will fail to link if:
978 *
979 * * any variable name specified in the <varyings> array is not
980 * declared as an output in the geometry shader (if present) or
981 * the vertex shader (if no geometry shader is present);
982 */
983 linker_error(prog, "Transform feedback varying %s undeclared.",
984 this->orig_name);
985 }
986
987 return this->matched_candidate;
988 }
989
990
991 /**
992 * Parse all the transform feedback declarations that were passed to
993 * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
994 *
995 * If an error occurs, the error is reported through linker_error() and false
996 * is returned.
997 */
998 static bool
999 parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
1000 const void *mem_ctx, unsigned num_names,
1001 char **varying_names, tfeedback_decl *decls)
1002 {
1003 for (unsigned i = 0; i < num_names; ++i) {
1004 decls[i].init(ctx, mem_ctx, varying_names[i]);
1005
1006 if (!decls[i].is_varying())
1007 continue;
1008
1009 /* From GL_EXT_transform_feedback:
1010 * A program will fail to link if:
1011 *
1012 * * any two entries in the <varyings> array specify the same varying
1013 * variable;
1014 *
1015 * We interpret this to mean "any two entries in the <varyings> array
1016 * specify the same varying variable and array index", since transform
1017 * feedback of arrays would be useless otherwise.
1018 */
1019 for (unsigned j = 0; j < i; ++j) {
1020 if (!decls[j].is_varying())
1021 continue;
1022
1023 if (tfeedback_decl::is_same(decls[i], decls[j])) {
1024 linker_error(prog, "Transform feedback varying %s specified "
1025 "more than once.", varying_names[i]);
1026 return false;
1027 }
1028 }
1029 }
1030 return true;
1031 }
1032
1033
1034 static int
1035 cmp_xfb_offset(const void * x_generic, const void * y_generic)
1036 {
1037 tfeedback_decl *x = (tfeedback_decl *) x_generic;
1038 tfeedback_decl *y = (tfeedback_decl *) y_generic;
1039
1040 if (x->get_buffer() != y->get_buffer())
1041 return x->get_buffer() - y->get_buffer();
1042 return x->get_offset() - y->get_offset();
1043 }
1044
1045 /**
1046 * Store transform feedback location assignments into
1047 * prog->sh.LinkedTransformFeedback based on the data stored in
1048 * tfeedback_decls.
1049 *
1050 * If an error occurs, the error is reported through linker_error() and false
1051 * is returned.
1052 */
1053 static bool
1054 store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
1055 unsigned num_tfeedback_decls,
1056 tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers)
1057 {
1058 if (!prog->last_vert_prog)
1059 return true;
1060
1061 /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
1062 * tracking the number of buffers doesn't overflow.
1063 */
1064 assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
1065
1066 bool separate_attribs_mode =
1067 prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
1068
1069 struct gl_program *xfb_prog = prog->last_vert_prog;
1070 xfb_prog->sh.LinkedTransformFeedback =
1071 rzalloc(xfb_prog, struct gl_transform_feedback_info);
1072
1073 /* The xfb_offset qualifier does not have to be used in increasing order
1074 * however some drivers expect to receive the list of transform feedback
1075 * declarations in order so sort it now for convenience.
1076 */
1077 if (has_xfb_qualifiers)
1078 qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
1079 cmp_xfb_offset);
1080
1081 xfb_prog->sh.LinkedTransformFeedback->Varyings =
1082 rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
1083 num_tfeedback_decls);
1084
1085 unsigned num_outputs = 0;
1086 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1087 if (tfeedback_decls[i].is_varying_written())
1088 num_outputs += tfeedback_decls[i].get_num_outputs();
1089 }
1090
1091 xfb_prog->sh.LinkedTransformFeedback->Outputs =
1092 rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
1093 num_outputs);
1094
1095 unsigned num_buffers = 0;
1096 unsigned buffers = 0;
1097
1098 if (!has_xfb_qualifiers && separate_attribs_mode) {
1099 /* GL_SEPARATE_ATTRIBS */
1100 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1101 if (!tfeedback_decls[i].store(ctx, prog,
1102 xfb_prog->sh.LinkedTransformFeedback,
1103 num_buffers, num_buffers, num_outputs,
1104 NULL, has_xfb_qualifiers))
1105 return false;
1106
1107 buffers |= 1 << num_buffers;
1108 num_buffers++;
1109 }
1110 }
1111 else {
1112 /* GL_INVERLEAVED_ATTRIBS */
1113 int buffer_stream_id = -1;
1114 unsigned buffer =
1115 num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
1116 bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
1117
1118 /* Apply any xfb_stride global qualifiers */
1119 if (has_xfb_qualifiers) {
1120 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1121 if (prog->TransformFeedback.BufferStride[j]) {
1122 buffers |= 1 << j;
1123 explicit_stride[j] = true;
1124 xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
1125 prog->TransformFeedback.BufferStride[j] / 4;
1126 }
1127 }
1128 }
1129
1130 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1131 if (has_xfb_qualifiers &&
1132 buffer != tfeedback_decls[i].get_buffer()) {
1133 /* we have moved to the next buffer so reset stream id */
1134 buffer_stream_id = -1;
1135 num_buffers++;
1136 }
1137
1138 if (tfeedback_decls[i].is_next_buffer_separator()) {
1139 if (!tfeedback_decls[i].store(ctx, prog,
1140 xfb_prog->sh.LinkedTransformFeedback,
1141 buffer, num_buffers, num_outputs,
1142 explicit_stride, has_xfb_qualifiers))
1143 return false;
1144 num_buffers++;
1145 buffer_stream_id = -1;
1146 continue;
1147 } else if (tfeedback_decls[i].is_varying()) {
1148 if (buffer_stream_id == -1) {
1149 /* First varying writing to this buffer: remember its stream */
1150 buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
1151 } else if (buffer_stream_id !=
1152 (int) tfeedback_decls[i].get_stream_id()) {
1153 /* Varying writes to the same buffer from a different stream */
1154 linker_error(prog,
1155 "Transform feedback can't capture varyings belonging "
1156 "to different vertex streams in a single buffer. "
1157 "Varying %s writes to buffer from stream %u, other "
1158 "varyings in the same buffer write from stream %u.",
1159 tfeedback_decls[i].name(),
1160 tfeedback_decls[i].get_stream_id(),
1161 buffer_stream_id);
1162 return false;
1163 }
1164 }
1165
1166 if (has_xfb_qualifiers) {
1167 buffer = tfeedback_decls[i].get_buffer();
1168 } else {
1169 buffer = num_buffers;
1170 }
1171 buffers |= 1 << buffer;
1172
1173 if (!tfeedback_decls[i].store(ctx, prog,
1174 xfb_prog->sh.LinkedTransformFeedback,
1175 buffer, num_buffers, num_outputs,
1176 explicit_stride, has_xfb_qualifiers))
1177 return false;
1178 }
1179 }
1180
1181 assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
1182
1183 xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
1184 return true;
1185 }
1186
1187 namespace {
1188
1189 /**
1190 * Data structure recording the relationship between outputs of one shader
1191 * stage (the "producer") and inputs of another (the "consumer").
1192 */
1193 class varying_matches
1194 {
1195 public:
1196 varying_matches(bool disable_varying_packing, bool xfb_enabled,
1197 bool enhanced_layouts_enabled,
1198 gl_shader_stage producer_stage,
1199 gl_shader_stage consumer_stage);
1200 ~varying_matches();
1201 void record(ir_variable *producer_var, ir_variable *consumer_var);
1202 unsigned assign_locations(struct gl_shader_program *prog,
1203 uint8_t *components,
1204 uint64_t reserved_slots);
1205 void store_locations() const;
1206
1207 private:
1208 bool is_varying_packing_safe(const glsl_type *type,
1209 const ir_variable *var);
1210
1211 /**
1212 * If true, this driver disables varying packing, so all varyings need to
1213 * be aligned on slot boundaries, and take up a number of slots equal to
1214 * their number of matrix columns times their array size.
1215 *
1216 * Packing may also be disabled because our current packing method is not
1217 * safe in SSO or versions of OpenGL where interpolation qualifiers are not
1218 * guaranteed to match across stages.
1219 */
1220 const bool disable_varying_packing;
1221
1222 /**
1223 * If true, this driver has transform feedback enabled. The transform
1224 * feedback code requires at least some packing be done even when varying
1225 * packing is disabled, fortunately where transform feedback requires
1226 * packing it's safe to override the disabled setting. See
1227 * is_varying_packing_safe().
1228 */
1229 const bool xfb_enabled;
1230
1231 const bool enhanced_layouts_enabled;
1232
1233 /**
1234 * Enum representing the order in which varyings are packed within a
1235 * packing class.
1236 *
1237 * Currently we pack vec4's first, then vec2's, then scalar values, then
1238 * vec3's. This order ensures that the only vectors that are at risk of
1239 * having to be "double parked" (split between two adjacent varying slots)
1240 * are the vec3's.
1241 */
1242 enum packing_order_enum {
1243 PACKING_ORDER_VEC4,
1244 PACKING_ORDER_VEC2,
1245 PACKING_ORDER_SCALAR,
1246 PACKING_ORDER_VEC3,
1247 };
1248
1249 static unsigned compute_packing_class(const ir_variable *var);
1250 static packing_order_enum compute_packing_order(const ir_variable *var);
1251 static int match_comparator(const void *x_generic, const void *y_generic);
1252 static int xfb_comparator(const void *x_generic, const void *y_generic);
1253
1254 /**
1255 * Structure recording the relationship between a single producer output
1256 * and a single consumer input.
1257 */
1258 struct match {
1259 /**
1260 * Packing class for this varying, computed by compute_packing_class().
1261 */
1262 unsigned packing_class;
1263
1264 /**
1265 * Packing order for this varying, computed by compute_packing_order().
1266 */
1267 packing_order_enum packing_order;
1268 unsigned num_components;
1269
1270 /**
1271 * The output variable in the producer stage.
1272 */
1273 ir_variable *producer_var;
1274
1275 /**
1276 * The input variable in the consumer stage.
1277 */
1278 ir_variable *consumer_var;
1279
1280 /**
1281 * The location which has been assigned for this varying. This is
1282 * expressed in multiples of a float, with the first generic varying
1283 * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
1284 * value 0.
1285 */
1286 unsigned generic_location;
1287 } *matches;
1288
1289 /**
1290 * The number of elements in the \c matches array that are currently in
1291 * use.
1292 */
1293 unsigned num_matches;
1294
1295 /**
1296 * The number of elements that were set aside for the \c matches array when
1297 * it was allocated.
1298 */
1299 unsigned matches_capacity;
1300
1301 gl_shader_stage producer_stage;
1302 gl_shader_stage consumer_stage;
1303 };
1304
1305 } /* anonymous namespace */
1306
1307 varying_matches::varying_matches(bool disable_varying_packing,
1308 bool xfb_enabled,
1309 bool enhanced_layouts_enabled,
1310 gl_shader_stage producer_stage,
1311 gl_shader_stage consumer_stage)
1312 : disable_varying_packing(disable_varying_packing),
1313 xfb_enabled(xfb_enabled),
1314 enhanced_layouts_enabled(enhanced_layouts_enabled),
1315 producer_stage(producer_stage),
1316 consumer_stage(consumer_stage)
1317 {
1318 /* Note: this initial capacity is rather arbitrarily chosen to be large
1319 * enough for many cases without wasting an unreasonable amount of space.
1320 * varying_matches::record() will resize the array if there are more than
1321 * this number of varyings.
1322 */
1323 this->matches_capacity = 8;
1324 this->matches = (match *)
1325 malloc(sizeof(*this->matches) * this->matches_capacity);
1326 this->num_matches = 0;
1327 }
1328
1329
1330 varying_matches::~varying_matches()
1331 {
1332 free(this->matches);
1333 }
1334
1335
1336 /**
1337 * Packing is always safe on individual arrays, structures, and matrices. It
1338 * is also safe if the varying is only used for transform feedback.
1339 */
1340 bool
1341 varying_matches::is_varying_packing_safe(const glsl_type *type,
1342 const ir_variable *var)
1343 {
1344 if (consumer_stage == MESA_SHADER_TESS_EVAL ||
1345 consumer_stage == MESA_SHADER_TESS_CTRL ||
1346 producer_stage == MESA_SHADER_TESS_CTRL)
1347 return false;
1348
1349 return xfb_enabled && (type->is_array() || type->is_record() ||
1350 type->is_matrix() || var->data.is_xfb_only);
1351 }
1352
1353
1354 /**
1355 * Record the given producer/consumer variable pair in the list of variables
1356 * that should later be assigned locations.
1357 *
1358 * It is permissible for \c consumer_var to be NULL (this happens if a
1359 * variable is output by the producer and consumed by transform feedback, but
1360 * not consumed by the consumer).
1361 *
1362 * If \c producer_var has already been paired up with a consumer_var, or
1363 * producer_var is part of fixed pipeline functionality (and hence already has
1364 * a location assigned), this function has no effect.
1365 *
1366 * Note: as a side effect this function may change the interpolation type of
1367 * \c producer_var, but only when the change couldn't possibly affect
1368 * rendering.
1369 */
1370 void
1371 varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
1372 {
1373 assert(producer_var != NULL || consumer_var != NULL);
1374
1375 if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
1376 producer_var->data.explicit_location)) ||
1377 (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
1378 consumer_var->data.explicit_location))) {
1379 /* Either a location already exists for this variable (since it is part
1380 * of fixed functionality), or it has already been recorded as part of a
1381 * previous match.
1382 */
1383 return;
1384 }
1385
1386 bool needs_flat_qualifier = consumer_var == NULL &&
1387 (producer_var->type->contains_integer() ||
1388 producer_var->type->contains_double());
1389
1390 if (!disable_varying_packing &&
1391 (needs_flat_qualifier ||
1392 (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
1393 /* Since this varying is not being consumed by the fragment shader, its
1394 * interpolation type varying cannot possibly affect rendering.
1395 * Also, this variable is non-flat and is (or contains) an integer
1396 * or a double.
1397 * If the consumer stage is unknown, don't modify the interpolation
1398 * type as it could affect rendering later with separate shaders.
1399 *
1400 * lower_packed_varyings requires all integer varyings to flat,
1401 * regardless of where they appear. We can trivially satisfy that
1402 * requirement by changing the interpolation type to flat here.
1403 */
1404 if (producer_var) {
1405 producer_var->data.centroid = false;
1406 producer_var->data.sample = false;
1407 producer_var->data.interpolation = INTERP_MODE_FLAT;
1408 }
1409
1410 if (consumer_var) {
1411 consumer_var->data.centroid = false;
1412 consumer_var->data.sample = false;
1413 consumer_var->data.interpolation = INTERP_MODE_FLAT;
1414 }
1415 }
1416
1417 if (this->num_matches == this->matches_capacity) {
1418 this->matches_capacity *= 2;
1419 this->matches = (match *)
1420 realloc(this->matches,
1421 sizeof(*this->matches) * this->matches_capacity);
1422 }
1423
1424 /* We must use the consumer to compute the packing class because in GL4.4+
1425 * there is no guarantee interpolation qualifiers will match across stages.
1426 *
1427 * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
1428 *
1429 * "The type and presence of interpolation qualifiers of variables with
1430 * the same name declared in all linked shaders for the same cross-stage
1431 * interface must match, otherwise the link command will fail.
1432 *
1433 * When comparing an output from one stage to an input of a subsequent
1434 * stage, the input and output don't match if their interpolation
1435 * qualifiers (or lack thereof) are not the same."
1436 *
1437 * This text was also in at least revison 7 of the 4.40 spec but is no
1438 * longer in revision 9 and not in the 4.50 spec.
1439 */
1440 const ir_variable *const var = (consumer_var != NULL)
1441 ? consumer_var : producer_var;
1442 const gl_shader_stage stage = (consumer_var != NULL)
1443 ? consumer_stage : producer_stage;
1444 const glsl_type *type = get_varying_type(var, stage);
1445
1446 if (producer_var && consumer_var &&
1447 consumer_var->data.must_be_shader_input) {
1448 producer_var->data.must_be_shader_input = 1;
1449 }
1450
1451 this->matches[this->num_matches].packing_class
1452 = this->compute_packing_class(var);
1453 this->matches[this->num_matches].packing_order
1454 = this->compute_packing_order(var);
1455 if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
1456 var->data.must_be_shader_input) {
1457 unsigned slots = type->count_attribute_slots(false);
1458 this->matches[this->num_matches].num_components = slots * 4;
1459 } else {
1460 this->matches[this->num_matches].num_components
1461 = type->component_slots();
1462 }
1463
1464 this->matches[this->num_matches].producer_var = producer_var;
1465 this->matches[this->num_matches].consumer_var = consumer_var;
1466 this->num_matches++;
1467 if (producer_var)
1468 producer_var->data.is_unmatched_generic_inout = 0;
1469 if (consumer_var)
1470 consumer_var->data.is_unmatched_generic_inout = 0;
1471 }
1472
1473
1474 /**
1475 * Choose locations for all of the variable matches that were previously
1476 * passed to varying_matches::record().
1477 */
1478 unsigned
1479 varying_matches::assign_locations(struct gl_shader_program *prog,
1480 uint8_t *components,
1481 uint64_t reserved_slots)
1482 {
1483 /* If packing has been disabled then we cannot safely sort the varyings by
1484 * class as it may mean we are using a version of OpenGL where
1485 * interpolation qualifiers are not guaranteed to be matching across
1486 * shaders, sorting in this case could result in mismatching shader
1487 * interfaces.
1488 * When packing is disabled the sort orders varyings used by transform
1489 * feedback first, but also depends on *undefined behaviour* of qsort to
1490 * reverse the order of the varyings. See: xfb_comparator().
1491 */
1492 if (!this->disable_varying_packing) {
1493 /* Sort varying matches into an order that makes them easy to pack. */
1494 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1495 &varying_matches::match_comparator);
1496 } else {
1497 /* Only sort varyings that are only used by transform feedback. */
1498 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1499 &varying_matches::xfb_comparator);
1500 }
1501
1502 unsigned generic_location = 0;
1503 unsigned generic_patch_location = MAX_VARYING*4;
1504 bool previous_var_xfb_only = false;
1505
1506 for (unsigned i = 0; i < this->num_matches; i++) {
1507 unsigned *location = &generic_location;
1508
1509 const ir_variable *var;
1510 const glsl_type *type;
1511 bool is_vertex_input = false;
1512 if (matches[i].consumer_var) {
1513 var = matches[i].consumer_var;
1514 type = get_varying_type(var, consumer_stage);
1515 if (consumer_stage == MESA_SHADER_VERTEX)
1516 is_vertex_input = true;
1517 } else {
1518 var = matches[i].producer_var;
1519 type = get_varying_type(var, producer_stage);
1520 }
1521
1522 if (var->data.patch)
1523 location = &generic_patch_location;
1524
1525 /* Advance to the next slot if this varying has a different packing
1526 * class than the previous one, and we're not already on a slot
1527 * boundary.
1528 *
1529 * Also advance to the next slot if packing is disabled. This makes sure
1530 * we don't assign varyings the same locations which is possible
1531 * because we still pack individual arrays, records and matrices even
1532 * when packing is disabled. Note we don't advance to the next slot if
1533 * we can pack varyings together that are only used for transform
1534 * feedback.
1535 */
1536 if (var->data.must_be_shader_input ||
1537 (this->disable_varying_packing &&
1538 !(previous_var_xfb_only && var->data.is_xfb_only)) ||
1539 (i > 0 && this->matches[i - 1].packing_class
1540 != this->matches[i].packing_class )) {
1541 *location = ALIGN(*location, 4);
1542 }
1543
1544 previous_var_xfb_only = var->data.is_xfb_only;
1545
1546 /* The number of components taken up by this variable. For vertex shader
1547 * inputs, we use the number of slots * 4, as they have different
1548 * counting rules.
1549 */
1550 unsigned num_components = is_vertex_input ?
1551 type->count_attribute_slots(is_vertex_input) * 4 :
1552 this->matches[i].num_components;
1553
1554 /* The last slot for this variable, inclusive. */
1555 unsigned slot_end = *location + num_components - 1;
1556
1557 /* FIXME: We could be smarter in the below code and loop back over
1558 * trying to fill any locations that we skipped because we couldn't pack
1559 * the varying between an explicit location. For now just let the user
1560 * hit the linking error if we run out of room and suggest they use
1561 * explicit locations.
1562 */
1563 while (slot_end < MAX_VARYING * 4u) {
1564 const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
1565 const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
1566
1567 assert(slots > 0);
1568 if (reserved_slots & slot_mask) {
1569 *location = ALIGN(*location + 1, 4);
1570 slot_end = *location + num_components - 1;
1571 continue;
1572 }
1573
1574 break;
1575 }
1576
1577 if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
1578 linker_error(prog, "insufficient contiguous locations available for "
1579 "%s it is possible an array or struct could not be "
1580 "packed between varyings with explicit locations. Try "
1581 "using an explicit location for arrays and structs.",
1582 var->name);
1583 }
1584
1585 if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
1586 for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
1587 components[j] = 4;
1588 components[slot_end / 4u] = (slot_end & 3) + 1;
1589 }
1590
1591 this->matches[i].generic_location = *location;
1592
1593 *location = slot_end + 1;
1594 }
1595
1596 return (generic_location + 3) / 4;
1597 }
1598
1599
1600 /**
1601 * Update the producer and consumer shaders to reflect the locations
1602 * assignments that were made by varying_matches::assign_locations().
1603 */
1604 void
1605 varying_matches::store_locations() const
1606 {
1607 /* Check is location needs to be packed with lower_packed_varyings() or if
1608 * we can just use ARB_enhanced_layouts packing.
1609 */
1610 bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
1611 const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
1612
1613 for (unsigned i = 0; i < this->num_matches; i++) {
1614 ir_variable *producer_var = this->matches[i].producer_var;
1615 ir_variable *consumer_var = this->matches[i].consumer_var;
1616 unsigned generic_location = this->matches[i].generic_location;
1617 unsigned slot = generic_location / 4;
1618 unsigned offset = generic_location % 4;
1619
1620 if (producer_var) {
1621 producer_var->data.location = VARYING_SLOT_VAR0 + slot;
1622 producer_var->data.location_frac = offset;
1623 }
1624
1625 if (consumer_var) {
1626 assert(consumer_var->data.location == -1);
1627 consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
1628 consumer_var->data.location_frac = offset;
1629 }
1630
1631 /* Find locations suitable for native packing via
1632 * ARB_enhanced_layouts.
1633 */
1634 if (producer_var && consumer_var) {
1635 if (enhanced_layouts_enabled) {
1636 const glsl_type *type =
1637 get_varying_type(producer_var, producer_stage);
1638 if (type->is_array() || type->is_matrix() || type->is_record() ||
1639 type->is_double()) {
1640 unsigned comp_slots = type->component_slots() + offset;
1641 unsigned slots = comp_slots / 4;
1642 if (comp_slots % 4)
1643 slots += 1;
1644
1645 for (unsigned j = 0; j < slots; j++) {
1646 pack_loc[slot + j] = true;
1647 }
1648 } else if (offset + type->vector_elements > 4) {
1649 pack_loc[slot] = true;
1650 pack_loc[slot + 1] = true;
1651 } else {
1652 loc_type[slot][offset] = type;
1653 }
1654 }
1655 }
1656 }
1657
1658 /* Attempt to use ARB_enhanced_layouts for more efficient packing if
1659 * suitable.
1660 */
1661 if (enhanced_layouts_enabled) {
1662 for (unsigned i = 0; i < this->num_matches; i++) {
1663 ir_variable *producer_var = this->matches[i].producer_var;
1664 ir_variable *consumer_var = this->matches[i].consumer_var;
1665 unsigned generic_location = this->matches[i].generic_location;
1666 unsigned slot = generic_location / 4;
1667
1668 if (pack_loc[slot] || !producer_var || !consumer_var)
1669 continue;
1670
1671 const glsl_type *type =
1672 get_varying_type(producer_var, producer_stage);
1673 bool type_match = true;
1674 for (unsigned j = 0; j < 4; j++) {
1675 if (loc_type[slot][j]) {
1676 if (type->base_type != loc_type[slot][j]->base_type)
1677 type_match = false;
1678 }
1679 }
1680
1681 if (type_match) {
1682 producer_var->data.explicit_location = 1;
1683 consumer_var->data.explicit_location = 1;
1684 producer_var->data.explicit_component = 1;
1685 consumer_var->data.explicit_component = 1;
1686 }
1687 }
1688 }
1689 }
1690
1691
1692 /**
1693 * Compute the "packing class" of the given varying. This is an unsigned
1694 * integer with the property that two variables in the same packing class can
1695 * be safely backed into the same vec4.
1696 */
1697 unsigned
1698 varying_matches::compute_packing_class(const ir_variable *var)
1699 {
1700 /* Without help from the back-end, there is no way to pack together
1701 * variables with different interpolation types, because
1702 * lower_packed_varyings must choose exactly one interpolation type for
1703 * each packed varying it creates.
1704 *
1705 * However, we can safely pack together floats, ints, and uints, because:
1706 *
1707 * - varyings of base type "int" and "uint" must use the "flat"
1708 * interpolation type, which can only occur in GLSL 1.30 and above.
1709 *
1710 * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
1711 * can store flat floats as ints without losing any information (using
1712 * the ir_unop_bitcast_* opcodes).
1713 *
1714 * Therefore, the packing class depends only on the interpolation type.
1715 */
1716 unsigned packing_class = var->data.centroid | (var->data.sample << 1) |
1717 (var->data.patch << 2) |
1718 (var->data.must_be_shader_input << 3);
1719 packing_class *= 8;
1720 packing_class += var->is_interpolation_flat()
1721 ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
1722 return packing_class;
1723 }
1724
1725
1726 /**
1727 * Compute the "packing order" of the given varying. This is a sort key we
1728 * use to determine when to attempt to pack the given varying relative to
1729 * other varyings in the same packing class.
1730 */
1731 varying_matches::packing_order_enum
1732 varying_matches::compute_packing_order(const ir_variable *var)
1733 {
1734 const glsl_type *element_type = var->type;
1735
1736 while (element_type->is_array()) {
1737 element_type = element_type->fields.array;
1738 }
1739
1740 switch (element_type->component_slots() % 4) {
1741 case 1: return PACKING_ORDER_SCALAR;
1742 case 2: return PACKING_ORDER_VEC2;
1743 case 3: return PACKING_ORDER_VEC3;
1744 case 0: return PACKING_ORDER_VEC4;
1745 default:
1746 assert(!"Unexpected value of vector_elements");
1747 return PACKING_ORDER_VEC4;
1748 }
1749 }
1750
1751
1752 /**
1753 * Comparison function passed to qsort() to sort varyings by packing_class and
1754 * then by packing_order.
1755 */
1756 int
1757 varying_matches::match_comparator(const void *x_generic, const void *y_generic)
1758 {
1759 const match *x = (const match *) x_generic;
1760 const match *y = (const match *) y_generic;
1761
1762 if (x->packing_class != y->packing_class)
1763 return x->packing_class - y->packing_class;
1764 return x->packing_order - y->packing_order;
1765 }
1766
1767
1768 /**
1769 * Comparison function passed to qsort() to sort varyings used only by
1770 * transform feedback when packing of other varyings is disabled.
1771 */
1772 int
1773 varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
1774 {
1775 const match *x = (const match *) x_generic;
1776
1777 if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
1778 return match_comparator(x_generic, y_generic);
1779
1780 /* FIXME: When the comparator returns 0 it means the elements being
1781 * compared are equivalent. However the qsort documentation says:
1782 *
1783 * "The order of equivalent elements is undefined."
1784 *
1785 * In practice the sort ends up reversing the order of the varyings which
1786 * means locations are also assigned in this reversed order and happens to
1787 * be what we want. This is also whats happening in
1788 * varying_matches::match_comparator().
1789 */
1790 return 0;
1791 }
1792
1793
1794 /**
1795 * Is the given variable a varying variable to be counted against the
1796 * limit in ctx->Const.MaxVarying?
1797 * This includes variables such as texcoords, colors and generic
1798 * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
1799 */
1800 static bool
1801 var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
1802 {
1803 /* Only fragment shaders will take a varying variable as an input */
1804 if (stage == MESA_SHADER_FRAGMENT &&
1805 var->data.mode == ir_var_shader_in) {
1806 switch (var->data.location) {
1807 case VARYING_SLOT_POS:
1808 case VARYING_SLOT_FACE:
1809 case VARYING_SLOT_PNTC:
1810 return false;
1811 default:
1812 return true;
1813 }
1814 }
1815 return false;
1816 }
1817
1818
1819 /**
1820 * Visitor class that generates tfeedback_candidate structs describing all
1821 * possible targets of transform feedback.
1822 *
1823 * tfeedback_candidate structs are stored in the hash table
1824 * tfeedback_candidates, which is passed to the constructor. This hash table
1825 * maps varying names to instances of the tfeedback_candidate struct.
1826 */
1827 class tfeedback_candidate_generator : public program_resource_visitor
1828 {
1829 public:
1830 tfeedback_candidate_generator(void *mem_ctx,
1831 hash_table *tfeedback_candidates)
1832 : mem_ctx(mem_ctx),
1833 tfeedback_candidates(tfeedback_candidates),
1834 toplevel_var(NULL),
1835 varying_floats(0)
1836 {
1837 }
1838
1839 void process(ir_variable *var)
1840 {
1841 /* All named varying interface blocks should be flattened by now */
1842 assert(!var->is_interface_instance());
1843
1844 this->toplevel_var = var;
1845 this->varying_floats = 0;
1846 program_resource_visitor::process(var, false);
1847 }
1848
1849 private:
1850 virtual void visit_field(const glsl_type *type, const char *name,
1851 bool /* row_major */,
1852 const glsl_type * /* record_type */,
1853 const enum glsl_interface_packing,
1854 bool /* last_field */)
1855 {
1856 assert(!type->without_array()->is_record());
1857 assert(!type->without_array()->is_interface());
1858
1859 tfeedback_candidate *candidate
1860 = rzalloc(this->mem_ctx, tfeedback_candidate);
1861 candidate->toplevel_var = this->toplevel_var;
1862 candidate->type = type;
1863 candidate->offset = this->varying_floats;
1864 _mesa_hash_table_insert(this->tfeedback_candidates,
1865 ralloc_strdup(this->mem_ctx, name),
1866 candidate);
1867 this->varying_floats += type->component_slots();
1868 }
1869
1870 /**
1871 * Memory context used to allocate hash table keys and values.
1872 */
1873 void * const mem_ctx;
1874
1875 /**
1876 * Hash table in which tfeedback_candidate objects should be stored.
1877 */
1878 hash_table * const tfeedback_candidates;
1879
1880 /**
1881 * Pointer to the toplevel variable that is being traversed.
1882 */
1883 ir_variable *toplevel_var;
1884
1885 /**
1886 * Total number of varying floats that have been visited so far. This is
1887 * used to determine the offset to each varying within the toplevel
1888 * variable.
1889 */
1890 unsigned varying_floats;
1891 };
1892
1893
1894 namespace linker {
1895
1896 void
1897 populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
1898 hash_table *consumer_inputs,
1899 hash_table *consumer_interface_inputs,
1900 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
1901 {
1902 memset(consumer_inputs_with_locations,
1903 0,
1904 sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
1905
1906 foreach_in_list(ir_instruction, node, ir) {
1907 ir_variable *const input_var = node->as_variable();
1908
1909 if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
1910 /* All interface blocks should have been lowered by this point */
1911 assert(!input_var->type->is_interface());
1912
1913 if (input_var->data.explicit_location) {
1914 /* assign_varying_locations only cares about finding the
1915 * ir_variable at the start of a contiguous location block.
1916 *
1917 * - For !producer, consumer_inputs_with_locations isn't used.
1918 *
1919 * - For !consumer, consumer_inputs_with_locations is empty.
1920 *
1921 * For consumer && producer, if you were trying to set some
1922 * ir_variable to the middle of a location block on the other side
1923 * of producer/consumer, cross_validate_outputs_to_inputs() should
1924 * be link-erroring due to either type mismatch or location
1925 * overlaps. If the variables do match up, then they've got a
1926 * matching data.location and you only looked at
1927 * consumer_inputs_with_locations[var->data.location], not any
1928 * following entries for the array/structure.
1929 */
1930 consumer_inputs_with_locations[input_var->data.location] =
1931 input_var;
1932 } else if (input_var->get_interface_type() != NULL) {
1933 char *const iface_field_name =
1934 ralloc_asprintf(mem_ctx, "%s.%s",
1935 input_var->get_interface_type()->without_array()->name,
1936 input_var->name);
1937 _mesa_hash_table_insert(consumer_interface_inputs,
1938 iface_field_name, input_var);
1939 } else {
1940 _mesa_hash_table_insert(consumer_inputs,
1941 ralloc_strdup(mem_ctx, input_var->name),
1942 input_var);
1943 }
1944 }
1945 }
1946 }
1947
1948 /**
1949 * Find a variable from the consumer that "matches" the specified variable
1950 *
1951 * This function only finds inputs with names that match. There is no
1952 * validation (here) that the types, etc. are compatible.
1953 */
1954 ir_variable *
1955 get_matching_input(void *mem_ctx,
1956 const ir_variable *output_var,
1957 hash_table *consumer_inputs,
1958 hash_table *consumer_interface_inputs,
1959 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
1960 {
1961 ir_variable *input_var;
1962
1963 if (output_var->data.explicit_location) {
1964 input_var = consumer_inputs_with_locations[output_var->data.location];
1965 } else if (output_var->get_interface_type() != NULL) {
1966 char *const iface_field_name =
1967 ralloc_asprintf(mem_ctx, "%s.%s",
1968 output_var->get_interface_type()->without_array()->name,
1969 output_var->name);
1970 hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
1971 input_var = entry ? (ir_variable *) entry->data : NULL;
1972 } else {
1973 hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
1974 input_var = entry ? (ir_variable *) entry->data : NULL;
1975 }
1976
1977 return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
1978 ? NULL : input_var;
1979 }
1980
1981 }
1982
1983 static int
1984 io_variable_cmp(const void *_a, const void *_b)
1985 {
1986 const ir_variable *const a = *(const ir_variable **) _a;
1987 const ir_variable *const b = *(const ir_variable **) _b;
1988
1989 if (a->data.explicit_location && b->data.explicit_location)
1990 return b->data.location - a->data.location;
1991
1992 if (a->data.explicit_location && !b->data.explicit_location)
1993 return 1;
1994
1995 if (!a->data.explicit_location && b->data.explicit_location)
1996 return -1;
1997
1998 return -strcmp(a->name, b->name);
1999 }
2000
2001 /**
2002 * Sort the shader IO variables into canonical order
2003 */
2004 static void
2005 canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
2006 {
2007 ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
2008 unsigned num_variables = 0;
2009
2010 foreach_in_list(ir_instruction, node, ir) {
2011 ir_variable *const var = node->as_variable();
2012
2013 if (var == NULL || var->data.mode != io_mode)
2014 continue;
2015
2016 /* If we have already encountered more I/O variables that could
2017 * successfully link, bail.
2018 */
2019 if (num_variables == ARRAY_SIZE(var_table))
2020 return;
2021
2022 var_table[num_variables++] = var;
2023 }
2024
2025 if (num_variables == 0)
2026 return;
2027
2028 /* Sort the list in reverse order (io_variable_cmp handles this). Later
2029 * we're going to push the variables on to the IR list as a stack, so we
2030 * want the last variable (in canonical order) to be first in the list.
2031 */
2032 qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
2033
2034 /* Remove the variable from it's current location in the IR, and put it at
2035 * the front.
2036 */
2037 for (unsigned i = 0; i < num_variables; i++) {
2038 var_table[i]->remove();
2039 ir->push_head(var_table[i]);
2040 }
2041 }
2042
2043 /**
2044 * Generate a bitfield map of the explicit locations for shader varyings.
2045 *
2046 * Note: For Tessellation shaders we are sitting right on the limits of the
2047 * 64 bit map. Per-vertex and per-patch both have separate location domains
2048 * with a max of MAX_VARYING.
2049 */
2050 static uint64_t
2051 reserved_varying_slot(struct gl_linked_shader *stage,
2052 ir_variable_mode io_mode)
2053 {
2054 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
2055 /* Avoid an overflow of the returned value */
2056 assert(MAX_VARYINGS_INCL_PATCH <= 64);
2057
2058 uint64_t slots = 0;
2059 int var_slot;
2060
2061 if (!stage)
2062 return slots;
2063
2064 foreach_in_list(ir_instruction, node, stage->ir) {
2065 ir_variable *const var = node->as_variable();
2066
2067 if (var == NULL || var->data.mode != io_mode ||
2068 !var->data.explicit_location ||
2069 var->data.location < VARYING_SLOT_VAR0)
2070 continue;
2071
2072 var_slot = var->data.location - VARYING_SLOT_VAR0;
2073
2074 unsigned num_elements = get_varying_type(var, stage->Stage)
2075 ->count_attribute_slots(io_mode == ir_var_shader_in &&
2076 stage->Stage == MESA_SHADER_VERTEX);
2077 for (unsigned i = 0; i < num_elements; i++) {
2078 if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
2079 slots |= UINT64_C(1) << var_slot;
2080 var_slot += 1;
2081 }
2082 }
2083
2084 return slots;
2085 }
2086
2087
2088 /**
2089 * Assign locations for all variables that are produced in one pipeline stage
2090 * (the "producer") and consumed in the next stage (the "consumer").
2091 *
2092 * Variables produced by the producer may also be consumed by transform
2093 * feedback.
2094 *
2095 * \param num_tfeedback_decls is the number of declarations indicating
2096 * variables that may be consumed by transform feedback.
2097 *
2098 * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
2099 * representing the result of parsing the strings passed to
2100 * glTransformFeedbackVaryings(). assign_location() will be called for
2101 * each of these objects that matches one of the outputs of the
2102 * producer.
2103 *
2104 * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
2105 * be NULL. In this case, varying locations are assigned solely based on the
2106 * requirements of transform feedback.
2107 */
2108 static bool
2109 assign_varying_locations(struct gl_context *ctx,
2110 void *mem_ctx,
2111 struct gl_shader_program *prog,
2112 gl_linked_shader *producer,
2113 gl_linked_shader *consumer,
2114 unsigned num_tfeedback_decls,
2115 tfeedback_decl *tfeedback_decls,
2116 const uint64_t reserved_slots)
2117 {
2118 /* Tessellation shaders treat inputs and outputs as shared memory and can
2119 * access inputs and outputs of other invocations.
2120 * Therefore, they can't be lowered to temps easily (and definitely not
2121 * efficiently).
2122 */
2123 bool unpackable_tess =
2124 (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
2125 (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
2126 (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
2127
2128 /* Transform feedback code assumes varying arrays are packed, so if the
2129 * driver has disabled varying packing, make sure to at least enable
2130 * packing required by transform feedback.
2131 */
2132 bool xfb_enabled =
2133 ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
2134
2135 /* Disable packing on outward facing interfaces for SSO because in ES we
2136 * need to retain the unpacked varying information for draw time
2137 * validation.
2138 *
2139 * Packing is still enabled on individual arrays, structs, and matrices as
2140 * these are required by the transform feedback code and it is still safe
2141 * to do so. We also enable packing when a varying is only used for
2142 * transform feedback and its not a SSO.
2143 */
2144 bool disable_varying_packing =
2145 ctx->Const.DisableVaryingPacking || unpackable_tess;
2146 if (prog->SeparateShader && (producer == NULL || consumer == NULL))
2147 disable_varying_packing = true;
2148
2149 varying_matches matches(disable_varying_packing, xfb_enabled,
2150 ctx->Extensions.ARB_enhanced_layouts,
2151 producer ? producer->Stage : MESA_SHADER_NONE,
2152 consumer ? consumer->Stage : MESA_SHADER_NONE);
2153 hash_table *tfeedback_candidates =
2154 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2155 _mesa_key_string_equal);
2156 hash_table *consumer_inputs =
2157 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2158 _mesa_key_string_equal);
2159 hash_table *consumer_interface_inputs =
2160 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2161 _mesa_key_string_equal);
2162 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
2163 NULL,
2164 };
2165
2166 unsigned consumer_vertices = 0;
2167 if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
2168 consumer_vertices = prog->Geom.VerticesIn;
2169
2170 /* Operate in a total of four passes.
2171 *
2172 * 1. Sort inputs / outputs into a canonical order. This is necessary so
2173 * that inputs / outputs of separable shaders will be assigned
2174 * predictable locations regardless of the order in which declarations
2175 * appeared in the shader source.
2176 *
2177 * 2. Assign locations for any matching inputs and outputs.
2178 *
2179 * 3. Mark output variables in the producer that do not have locations as
2180 * not being outputs. This lets the optimizer eliminate them.
2181 *
2182 * 4. Mark input variables in the consumer that do not have locations as
2183 * not being inputs. This lets the optimizer eliminate them.
2184 */
2185 if (consumer)
2186 canonicalize_shader_io(consumer->ir, ir_var_shader_in);
2187
2188 if (producer)
2189 canonicalize_shader_io(producer->ir, ir_var_shader_out);
2190
2191 if (consumer)
2192 linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
2193 consumer_inputs,
2194 consumer_interface_inputs,
2195 consumer_inputs_with_locations);
2196
2197 if (producer) {
2198 foreach_in_list(ir_instruction, node, producer->ir) {
2199 ir_variable *const output_var = node->as_variable();
2200
2201 if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
2202 continue;
2203
2204 /* Only geometry shaders can use non-zero streams */
2205 assert(output_var->data.stream == 0 ||
2206 (output_var->data.stream < MAX_VERTEX_STREAMS &&
2207 producer->Stage == MESA_SHADER_GEOMETRY));
2208
2209 if (num_tfeedback_decls > 0) {
2210 tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates);
2211 g.process(output_var);
2212 }
2213
2214 ir_variable *const input_var =
2215 linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
2216 consumer_interface_inputs,
2217 consumer_inputs_with_locations);
2218
2219 /* If a matching input variable was found, add this output (and the
2220 * input) to the set. If this is a separable program and there is no
2221 * consumer stage, add the output.
2222 *
2223 * Always add TCS outputs. They are shared by all invocations
2224 * within a patch and can be used as shared memory.
2225 */
2226 if (input_var || (prog->SeparateShader && consumer == NULL) ||
2227 producer->Stage == MESA_SHADER_TESS_CTRL) {
2228 matches.record(output_var, input_var);
2229 }
2230
2231 /* Only stream 0 outputs can be consumed in the next stage */
2232 if (input_var && output_var->data.stream != 0) {
2233 linker_error(prog, "output %s is assigned to stream=%d but "
2234 "is linked to an input, which requires stream=0",
2235 output_var->name, output_var->data.stream);
2236 return false;
2237 }
2238 }
2239 } else {
2240 /* If there's no producer stage, then this must be a separable program.
2241 * For example, we may have a program that has just a fragment shader.
2242 * Later this program will be used with some arbitrary vertex (or
2243 * geometry) shader program. This means that locations must be assigned
2244 * for all the inputs.
2245 */
2246 foreach_in_list(ir_instruction, node, consumer->ir) {
2247 ir_variable *const input_var = node->as_variable();
2248
2249 if (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2250 continue;
2251
2252 matches.record(NULL, input_var);
2253 }
2254 }
2255
2256 _mesa_hash_table_destroy(consumer_inputs, NULL);
2257 _mesa_hash_table_destroy(consumer_interface_inputs, NULL);
2258
2259 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2260 if (!tfeedback_decls[i].is_varying())
2261 continue;
2262
2263 const tfeedback_candidate *matched_candidate
2264 = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
2265
2266 if (matched_candidate == NULL) {
2267 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2268 return false;
2269 }
2270
2271 /* Mark xfb varyings as always active */
2272 matched_candidate->toplevel_var->data.always_active_io = 1;
2273
2274 if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
2275 matched_candidate->toplevel_var->data.is_xfb_only = 1;
2276 matches.record(matched_candidate->toplevel_var, NULL);
2277 }
2278 }
2279
2280 uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
2281 const unsigned slots_used = matches.assign_locations(
2282 prog, components, reserved_slots);
2283 matches.store_locations();
2284
2285 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2286 if (!tfeedback_decls[i].is_varying())
2287 continue;
2288
2289 if (!tfeedback_decls[i].assign_location(ctx, prog)) {
2290 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2291 return false;
2292 }
2293 }
2294 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2295
2296 if (consumer && producer) {
2297 foreach_in_list(ir_instruction, node, consumer->ir) {
2298 ir_variable *const var = node->as_variable();
2299
2300 if (var && var->data.mode == ir_var_shader_in &&
2301 var->data.is_unmatched_generic_inout) {
2302 if (!prog->IsES && prog->data->Version <= 120) {
2303 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
2304 *
2305 * Only those varying variables used (i.e. read) in
2306 * the fragment shader executable must be written to
2307 * by the vertex shader executable; declaring
2308 * superfluous varying variables in a vertex shader is
2309 * permissible.
2310 *
2311 * We interpret this text as meaning that the VS must
2312 * write the variable for the FS to read it. See
2313 * "glsl1-varying read but not written" in piglit.
2314 */
2315 linker_error(prog, "%s shader varying %s not written "
2316 "by %s shader\n.",
2317 _mesa_shader_stage_to_string(consumer->Stage),
2318 var->name,
2319 _mesa_shader_stage_to_string(producer->Stage));
2320 } else {
2321 linker_warning(prog, "%s shader varying %s not written "
2322 "by %s shader\n.",
2323 _mesa_shader_stage_to_string(consumer->Stage),
2324 var->name,
2325 _mesa_shader_stage_to_string(producer->Stage));
2326 }
2327 }
2328 }
2329
2330 /* Now that validation is done its safe to remove unused varyings. As
2331 * we have both a producer and consumer its safe to remove unused
2332 * varyings even if the program is a SSO because the stages are being
2333 * linked together i.e. we have a multi-stage SSO.
2334 */
2335 remove_unused_shader_inputs_and_outputs(false, producer,
2336 ir_var_shader_out);
2337 remove_unused_shader_inputs_and_outputs(false, consumer,
2338 ir_var_shader_in);
2339 }
2340
2341 if (producer) {
2342 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
2343 0, producer, disable_varying_packing,
2344 xfb_enabled);
2345 }
2346
2347 if (consumer) {
2348 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
2349 consumer_vertices, consumer,
2350 disable_varying_packing, xfb_enabled);
2351 }
2352
2353 return true;
2354 }
2355
2356 static bool
2357 check_against_output_limit(struct gl_context *ctx,
2358 struct gl_shader_program *prog,
2359 gl_linked_shader *producer,
2360 unsigned num_explicit_locations)
2361 {
2362 unsigned output_vectors = num_explicit_locations;
2363
2364 foreach_in_list(ir_instruction, node, producer->ir) {
2365 ir_variable *const var = node->as_variable();
2366
2367 if (var && !var->data.explicit_location &&
2368 var->data.mode == ir_var_shader_out &&
2369 var_counts_against_varying_limit(producer->Stage, var)) {
2370 /* outputs for fragment shader can't be doubles */
2371 output_vectors += var->type->count_attribute_slots(false);
2372 }
2373 }
2374
2375 assert(producer->Stage != MESA_SHADER_FRAGMENT);
2376 unsigned max_output_components =
2377 ctx->Const.Program[producer->Stage].MaxOutputComponents;
2378
2379 const unsigned output_components = output_vectors * 4;
2380 if (output_components > max_output_components) {
2381 if (ctx->API == API_OPENGLES2 || prog->IsES)
2382 linker_error(prog, "%s shader uses too many output vectors "
2383 "(%u > %u)\n",
2384 _mesa_shader_stage_to_string(producer->Stage),
2385 output_vectors,
2386 max_output_components / 4);
2387 else
2388 linker_error(prog, "%s shader uses too many output components "
2389 "(%u > %u)\n",
2390 _mesa_shader_stage_to_string(producer->Stage),
2391 output_components,
2392 max_output_components);
2393
2394 return false;
2395 }
2396
2397 return true;
2398 }
2399
2400 static bool
2401 check_against_input_limit(struct gl_context *ctx,
2402 struct gl_shader_program *prog,
2403 gl_linked_shader *consumer,
2404 unsigned num_explicit_locations)
2405 {
2406 unsigned input_vectors = num_explicit_locations;
2407
2408 foreach_in_list(ir_instruction, node, consumer->ir) {
2409 ir_variable *const var = node->as_variable();
2410
2411 if (var && !var->data.explicit_location &&
2412 var->data.mode == ir_var_shader_in &&
2413 var_counts_against_varying_limit(consumer->Stage, var)) {
2414 /* vertex inputs aren't varying counted */
2415 input_vectors += var->type->count_attribute_slots(false);
2416 }
2417 }
2418
2419 assert(consumer->Stage != MESA_SHADER_VERTEX);
2420 unsigned max_input_components =
2421 ctx->Const.Program[consumer->Stage].MaxInputComponents;
2422
2423 const unsigned input_components = input_vectors * 4;
2424 if (input_components > max_input_components) {
2425 if (ctx->API == API_OPENGLES2 || prog->IsES)
2426 linker_error(prog, "%s shader uses too many input vectors "
2427 "(%u > %u)\n",
2428 _mesa_shader_stage_to_string(consumer->Stage),
2429 input_vectors,
2430 max_input_components / 4);
2431 else
2432 linker_error(prog, "%s shader uses too many input components "
2433 "(%u > %u)\n",
2434 _mesa_shader_stage_to_string(consumer->Stage),
2435 input_components,
2436 max_input_components);
2437
2438 return false;
2439 }
2440
2441 return true;
2442 }
2443
2444 bool
2445 link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
2446 struct gl_context *ctx, void *mem_ctx)
2447 {
2448 bool has_xfb_qualifiers = false;
2449 unsigned num_tfeedback_decls = 0;
2450 char **varying_names = NULL;
2451 tfeedback_decl *tfeedback_decls = NULL;
2452
2453 /* From the ARB_enhanced_layouts spec:
2454 *
2455 * "If the shader used to record output variables for transform feedback
2456 * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
2457 * qualifiers, the values specified by TransformFeedbackVaryings are
2458 * ignored, and the set of variables captured for transform feedback is
2459 * instead derived from the specified layout qualifiers."
2460 */
2461 for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
2462 /* Find last stage before fragment shader */
2463 if (prog->_LinkedShaders[i]) {
2464 has_xfb_qualifiers =
2465 process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
2466 prog, &num_tfeedback_decls,
2467 &varying_names);
2468 break;
2469 }
2470 }
2471
2472 if (!has_xfb_qualifiers) {
2473 num_tfeedback_decls = prog->TransformFeedback.NumVarying;
2474 varying_names = prog->TransformFeedback.VaryingNames;
2475 }
2476
2477 if (num_tfeedback_decls != 0) {
2478 /* From GL_EXT_transform_feedback:
2479 * A program will fail to link if:
2480 *
2481 * * the <count> specified by TransformFeedbackVaryingsEXT is
2482 * non-zero, but the program object has no vertex or geometry
2483 * shader;
2484 */
2485 if (first >= MESA_SHADER_FRAGMENT) {
2486 linker_error(prog, "Transform feedback varyings specified, but "
2487 "no vertex, tessellation, or geometry shader is "
2488 "present.\n");
2489 return false;
2490 }
2491
2492 tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
2493 num_tfeedback_decls);
2494 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2495 varying_names, tfeedback_decls))
2496 return false;
2497 }
2498
2499 /* If there is no fragment shader we need to set transform feedback.
2500 *
2501 * For SSO we also need to assign output locations. We assign them here
2502 * because we need to do it for both single stage programs and multi stage
2503 * programs.
2504 */
2505 if (last < MESA_SHADER_FRAGMENT &&
2506 (num_tfeedback_decls != 0 || prog->SeparateShader)) {
2507 const uint64_t reserved_out_slots =
2508 reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
2509 if (!assign_varying_locations(ctx, mem_ctx, prog,
2510 prog->_LinkedShaders[last], NULL,
2511 num_tfeedback_decls, tfeedback_decls,
2512 reserved_out_slots))
2513 return false;
2514 }
2515
2516 if (last <= MESA_SHADER_FRAGMENT) {
2517 /* Remove unused varyings from the first/last stage unless SSO */
2518 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2519 prog->_LinkedShaders[first],
2520 ir_var_shader_in);
2521 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2522 prog->_LinkedShaders[last],
2523 ir_var_shader_out);
2524
2525 /* If the program is made up of only a single stage */
2526 if (first == last) {
2527 gl_linked_shader *const sh = prog->_LinkedShaders[last];
2528
2529 do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
2530 do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
2531 tfeedback_decls);
2532
2533 if (prog->SeparateShader) {
2534 const uint64_t reserved_slots =
2535 reserved_varying_slot(sh, ir_var_shader_in);
2536
2537 /* Assign input locations for SSO, output locations are already
2538 * assigned.
2539 */
2540 if (!assign_varying_locations(ctx, mem_ctx, prog,
2541 NULL /* producer */,
2542 sh /* consumer */,
2543 0 /* num_tfeedback_decls */,
2544 NULL /* tfeedback_decls */,
2545 reserved_slots))
2546 return false;
2547 }
2548 } else {
2549 /* Linking the stages in the opposite order (from fragment to vertex)
2550 * ensures that inter-shader outputs written to in an earlier stage
2551 * are eliminated if they are (transitively) not used in a later
2552 * stage.
2553 */
2554 int next = last;
2555 for (int i = next - 1; i >= 0; i--) {
2556 if (prog->_LinkedShaders[i] == NULL && i != 0)
2557 continue;
2558
2559 gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
2560 gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
2561
2562 const uint64_t reserved_out_slots =
2563 reserved_varying_slot(sh_i, ir_var_shader_out);
2564 const uint64_t reserved_in_slots =
2565 reserved_varying_slot(sh_next, ir_var_shader_in);
2566
2567 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2568 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2569 tfeedback_decls);
2570
2571 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2572 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2573 tfeedback_decls,
2574 reserved_out_slots | reserved_in_slots))
2575 return false;
2576
2577 /* This must be done after all dead varyings are eliminated. */
2578 if (sh_i != NULL) {
2579 unsigned slots_used = _mesa_bitcount_64(reserved_out_slots);
2580 if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
2581 return false;
2582 }
2583 }
2584
2585 unsigned slots_used = _mesa_bitcount_64(reserved_in_slots);
2586 if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
2587 return false;
2588
2589 next = i;
2590 }
2591 }
2592 }
2593
2594 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
2595 has_xfb_qualifiers))
2596 return false;
2597
2598 return true;
2599 }