glsl: mark xfb inputs as always_active_io
[mesa.git] / src / compiler / glsl / link_varyings.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 /**
25 * \file link_varyings.cpp
26 *
27 * Linker functions related specifically to linking varyings between shader
28 * stages.
29 */
30
31
32 #include "main/mtypes.h"
33 #include "glsl_symbol_table.h"
34 #include "glsl_parser_extras.h"
35 #include "ir_optimization.h"
36 #include "linker.h"
37 #include "link_varyings.h"
38 #include "main/macros.h"
39 #include "util/hash_table.h"
40 #include "program.h"
41
42
43 /**
44 * Get the varying type stripped of the outermost array if we're processing
45 * a stage whose varyings are arrays indexed by a vertex number (such as
46 * geometry shader inputs).
47 */
48 static const glsl_type *
49 get_varying_type(const ir_variable *var, gl_shader_stage stage)
50 {
51 const glsl_type *type = var->type;
52
53 if (!var->data.patch &&
54 ((var->data.mode == ir_var_shader_out &&
55 stage == MESA_SHADER_TESS_CTRL) ||
56 (var->data.mode == ir_var_shader_in &&
57 (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
58 stage == MESA_SHADER_GEOMETRY)))) {
59 assert(type->is_array());
60 type = type->fields.array;
61 }
62
63 return type;
64 }
65
66 static void
67 create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
68 size_t name_length, unsigned *count,
69 const char *ifc_member_name,
70 const glsl_type *ifc_member_t, char ***varying_names)
71 {
72 if (t->is_interface()) {
73 size_t new_length = name_length;
74
75 assert(ifc_member_name && ifc_member_t);
76 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
77
78 create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
79 NULL, NULL, varying_names);
80 } else if (t->is_record()) {
81 for (unsigned i = 0; i < t->length; i++) {
82 const char *field = t->fields.structure[i].name;
83 size_t new_length = name_length;
84
85 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
86
87 create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
88 new_length, count, NULL, NULL,
89 varying_names);
90 }
91 } else if (t->without_array()->is_record() ||
92 t->without_array()->is_interface() ||
93 (t->is_array() && t->fields.array->is_array())) {
94 for (unsigned i = 0; i < t->length; i++) {
95 size_t new_length = name_length;
96
97 /* Append the subscript to the current variable name */
98 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
99
100 create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
101 count, ifc_member_name, ifc_member_t,
102 varying_names);
103 }
104 } else {
105 (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
106 }
107 }
108
109 static bool
110 process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
111 struct gl_shader_program *prog,
112 unsigned *num_tfeedback_decls,
113 char ***varying_names)
114 {
115 bool has_xfb_qualifiers = false;
116
117 /* We still need to enable transform feedback mode even if xfb_stride is
118 * only applied to a global out. Also we don't bother to propagate
119 * xfb_stride to interface block members so this will catch that case also.
120 */
121 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
122 if (prog->TransformFeedback.BufferStride[j]) {
123 has_xfb_qualifiers = true;
124 break;
125 }
126 }
127
128 foreach_in_list(ir_instruction, node, sh->ir) {
129 ir_variable *var = node->as_variable();
130 if (!var || var->data.mode != ir_var_shader_out)
131 continue;
132
133 /* From the ARB_enhanced_layouts spec:
134 *
135 * "Any shader making any static use (after preprocessing) of any of
136 * these *xfb_* qualifiers will cause the shader to be in a
137 * transform feedback capturing mode and hence responsible for
138 * describing the transform feedback setup. This mode will capture
139 * any output selected by *xfb_offset*, directly or indirectly, to
140 * a transform feedback buffer."
141 */
142 if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
143 has_xfb_qualifiers = true;
144 }
145
146 if (var->data.explicit_xfb_offset) {
147 *num_tfeedback_decls += var->type->varying_count();
148 has_xfb_qualifiers = true;
149 }
150 }
151
152 if (*num_tfeedback_decls == 0)
153 return has_xfb_qualifiers;
154
155 unsigned i = 0;
156 *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
157 foreach_in_list(ir_instruction, node, sh->ir) {
158 ir_variable *var = node->as_variable();
159 if (!var || var->data.mode != ir_var_shader_out)
160 continue;
161
162 if (var->data.explicit_xfb_offset) {
163 char *name;
164 const glsl_type *type, *member_type;
165
166 if (var->data.from_named_ifc_block) {
167 type = var->get_interface_type();
168 /* Find the member type before it was altered by lowering */
169 member_type =
170 type->fields.structure[type->field_index(var->name)].type;
171 name = ralloc_strdup(NULL, type->without_array()->name);
172 } else {
173 type = var->type;
174 member_type = NULL;
175 name = ralloc_strdup(NULL, var->name);
176 }
177 create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
178 var->name, member_type, varying_names);
179 ralloc_free(name);
180 }
181 }
182
183 assert(i == *num_tfeedback_decls);
184 return has_xfb_qualifiers;
185 }
186
187 /**
188 * Validate the types and qualifiers of an output from one stage against the
189 * matching input to another stage.
190 */
191 static void
192 cross_validate_types_and_qualifiers(struct gl_shader_program *prog,
193 const ir_variable *input,
194 const ir_variable *output,
195 gl_shader_stage consumer_stage,
196 gl_shader_stage producer_stage)
197 {
198 /* Check that the types match between stages.
199 */
200 const glsl_type *type_to_match = input->type;
201
202 /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
203 const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
204 consumer_stage != MESA_SHADER_FRAGMENT) ||
205 consumer_stage == MESA_SHADER_GEOMETRY;
206 if (extra_array_level) {
207 assert(type_to_match->is_array());
208 type_to_match = type_to_match->fields.array;
209 }
210
211 if (type_to_match != output->type) {
212 /* There is a bit of a special case for gl_TexCoord. This
213 * built-in is unsized by default. Applications that variable
214 * access it must redeclare it with a size. There is some
215 * language in the GLSL spec that implies the fragment shader
216 * and vertex shader do not have to agree on this size. Other
217 * driver behave this way, and one or two applications seem to
218 * rely on it.
219 *
220 * Neither declaration needs to be modified here because the array
221 * sizes are fixed later when update_array_sizes is called.
222 *
223 * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
224 *
225 * "Unlike user-defined varying variables, the built-in
226 * varying variables don't have a strict one-to-one
227 * correspondence between the vertex language and the
228 * fragment language."
229 */
230 if (!output->type->is_array() || !is_gl_identifier(output->name)) {
231 linker_error(prog,
232 "%s shader output `%s' declared as type `%s', "
233 "but %s shader input declared as type `%s'\n",
234 _mesa_shader_stage_to_string(producer_stage),
235 output->name,
236 output->type->name,
237 _mesa_shader_stage_to_string(consumer_stage),
238 input->type->name);
239 return;
240 }
241 }
242
243 /* Check that all of the qualifiers match between stages.
244 */
245
246 /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
247 * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
248 * conformance test suite does not verify that the qualifiers must match.
249 * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
250 * OpenGLES 3.0 drivers, so we relax the checking in all cases.
251 */
252 if (false /* always skip the centroid check */ &&
253 prog->data->Version < (prog->IsES ? 310 : 430) &&
254 input->data.centroid != output->data.centroid) {
255 linker_error(prog,
256 "%s shader output `%s' %s centroid qualifier, "
257 "but %s shader input %s centroid qualifier\n",
258 _mesa_shader_stage_to_string(producer_stage),
259 output->name,
260 (output->data.centroid) ? "has" : "lacks",
261 _mesa_shader_stage_to_string(consumer_stage),
262 (input->data.centroid) ? "has" : "lacks");
263 return;
264 }
265
266 if (input->data.sample != output->data.sample) {
267 linker_error(prog,
268 "%s shader output `%s' %s sample qualifier, "
269 "but %s shader input %s sample qualifier\n",
270 _mesa_shader_stage_to_string(producer_stage),
271 output->name,
272 (output->data.sample) ? "has" : "lacks",
273 _mesa_shader_stage_to_string(consumer_stage),
274 (input->data.sample) ? "has" : "lacks");
275 return;
276 }
277
278 if (input->data.patch != output->data.patch) {
279 linker_error(prog,
280 "%s shader output `%s' %s patch qualifier, "
281 "but %s shader input %s patch qualifier\n",
282 _mesa_shader_stage_to_string(producer_stage),
283 output->name,
284 (output->data.patch) ? "has" : "lacks",
285 _mesa_shader_stage_to_string(consumer_stage),
286 (input->data.patch) ? "has" : "lacks");
287 return;
288 }
289
290 /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
291 *
292 * "As only outputs need be declared with invariant, an output from
293 * one shader stage will still match an input of a subsequent stage
294 * without the input being declared as invariant."
295 *
296 * while GLSL 4.20 says:
297 *
298 * "For variables leaving one shader and coming into another shader,
299 * the invariant keyword has to be used in both shaders, or a link
300 * error will result."
301 *
302 * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
303 *
304 * "The invariance of varyings that are declared in both the vertex
305 * and fragment shaders must match."
306 */
307 if (input->data.invariant != output->data.invariant &&
308 prog->data->Version < (prog->IsES ? 300 : 430)) {
309 linker_error(prog,
310 "%s shader output `%s' %s invariant qualifier, "
311 "but %s shader input %s invariant qualifier\n",
312 _mesa_shader_stage_to_string(producer_stage),
313 output->name,
314 (output->data.invariant) ? "has" : "lacks",
315 _mesa_shader_stage_to_string(consumer_stage),
316 (input->data.invariant) ? "has" : "lacks");
317 return;
318 }
319
320 /* GLSL >= 4.40 removes text requiring interpolation qualifiers
321 * to match cross stage, they must only match within the same stage.
322 *
323 * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
324 *
325 * "It is a link-time error if, within the same stage, the interpolation
326 * qualifiers of variables of the same name do not match.
327 *
328 * Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
329 *
330 * "When no interpolation qualifier is present, smooth interpolation
331 * is used."
332 *
333 * So we match variables where one is smooth and the other has no explicit
334 * qualifier.
335 */
336 unsigned input_interpolation = input->data.interpolation;
337 unsigned output_interpolation = output->data.interpolation;
338 if (prog->IsES) {
339 if (input_interpolation == INTERP_MODE_NONE)
340 input_interpolation = INTERP_MODE_SMOOTH;
341 if (output_interpolation == INTERP_MODE_NONE)
342 output_interpolation = INTERP_MODE_SMOOTH;
343 }
344 if (input_interpolation != output_interpolation &&
345 prog->data->Version < 440) {
346 linker_error(prog,
347 "%s shader output `%s' specifies %s "
348 "interpolation qualifier, "
349 "but %s shader input specifies %s "
350 "interpolation qualifier\n",
351 _mesa_shader_stage_to_string(producer_stage),
352 output->name,
353 interpolation_string(output->data.interpolation),
354 _mesa_shader_stage_to_string(consumer_stage),
355 interpolation_string(input->data.interpolation));
356 return;
357 }
358 }
359
360 /**
361 * Validate front and back color outputs against single color input
362 */
363 static void
364 cross_validate_front_and_back_color(struct gl_shader_program *prog,
365 const ir_variable *input,
366 const ir_variable *front_color,
367 const ir_variable *back_color,
368 gl_shader_stage consumer_stage,
369 gl_shader_stage producer_stage)
370 {
371 if (front_color != NULL && front_color->data.assigned)
372 cross_validate_types_and_qualifiers(prog, input, front_color,
373 consumer_stage, producer_stage);
374
375 if (back_color != NULL && back_color->data.assigned)
376 cross_validate_types_and_qualifiers(prog, input, back_color,
377 consumer_stage, producer_stage);
378 }
379
380 /**
381 * Validate that outputs from one stage match inputs of another
382 */
383 void
384 cross_validate_outputs_to_inputs(struct gl_shader_program *prog,
385 gl_linked_shader *producer,
386 gl_linked_shader *consumer)
387 {
388 glsl_symbol_table parameters;
389 ir_variable *explicit_locations[MAX_VARYINGS_INCL_PATCH][4] =
390 { {NULL, NULL} };
391
392 /* Find all shader outputs in the "producer" stage.
393 */
394 foreach_in_list(ir_instruction, node, producer->ir) {
395 ir_variable *const var = node->as_variable();
396
397 if (var == NULL || var->data.mode != ir_var_shader_out)
398 continue;
399
400 if (!var->data.explicit_location
401 || var->data.location < VARYING_SLOT_VAR0)
402 parameters.add_variable(var);
403 else {
404 /* User-defined varyings with explicit locations are handled
405 * differently because they do not need to have matching names.
406 */
407 const glsl_type *type = get_varying_type(var, producer->Stage);
408 unsigned num_elements = type->count_attribute_slots(false);
409 unsigned idx = var->data.location - VARYING_SLOT_VAR0;
410 unsigned slot_limit = idx + num_elements;
411 unsigned last_comp;
412
413 if (type->without_array()->is_record()) {
414 /* The component qualifier can't be used on structs so just treat
415 * all component slots as used.
416 */
417 last_comp = 4;
418 } else {
419 unsigned dmul = type->without_array()->is_64bit() ? 2 : 1;
420 last_comp = var->data.location_frac +
421 type->without_array()->vector_elements * dmul;
422 }
423
424 while (idx < slot_limit) {
425 unsigned i = var->data.location_frac;
426 while (i < last_comp) {
427 if (explicit_locations[idx][i] != NULL) {
428 linker_error(prog,
429 "%s shader has multiple outputs explicitly "
430 "assigned to location %d and component %d\n",
431 _mesa_shader_stage_to_string(producer->Stage),
432 idx, var->data.location_frac);
433 return;
434 }
435
436 /* Make sure all component at this location have the same type.
437 */
438 for (unsigned j = 0; j < 4; j++) {
439 if (explicit_locations[idx][j] &&
440 (explicit_locations[idx][j]->type->without_array()
441 ->base_type != type->without_array()->base_type)) {
442 linker_error(prog,
443 "Varyings sharing the same location must "
444 "have the same underlying numerical type. "
445 "Location %u component %u\n", idx,
446 var->data.location_frac);
447 return;
448 }
449 }
450
451 explicit_locations[idx][i] = var;
452 i++;
453
454 /* We need to do some special handling for doubles as dvec3 and
455 * dvec4 consume two consecutive locations. We don't need to
456 * worry about components beginning at anything other than 0 as
457 * the spec does not allow this for dvec3 and dvec4.
458 */
459 if (i == 4 && last_comp > 4) {
460 last_comp = last_comp - 4;
461 /* Bump location index and reset the component index */
462 idx++;
463 i = 0;
464 }
465 }
466 idx++;
467 }
468 }
469 }
470
471
472 /* Find all shader inputs in the "consumer" stage. Any variables that have
473 * matching outputs already in the symbol table must have the same type and
474 * qualifiers.
475 *
476 * Exception: if the consumer is the geometry shader, then the inputs
477 * should be arrays and the type of the array element should match the type
478 * of the corresponding producer output.
479 */
480 foreach_in_list(ir_instruction, node, consumer->ir) {
481 ir_variable *const input = node->as_variable();
482
483 if (input == NULL || input->data.mode != ir_var_shader_in)
484 continue;
485
486 if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
487 const ir_variable *const front_color =
488 parameters.get_variable("gl_FrontColor");
489
490 const ir_variable *const back_color =
491 parameters.get_variable("gl_BackColor");
492
493 cross_validate_front_and_back_color(prog, input,
494 front_color, back_color,
495 consumer->Stage, producer->Stage);
496 } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
497 const ir_variable *const front_color =
498 parameters.get_variable("gl_FrontSecondaryColor");
499
500 const ir_variable *const back_color =
501 parameters.get_variable("gl_BackSecondaryColor");
502
503 cross_validate_front_and_back_color(prog, input,
504 front_color, back_color,
505 consumer->Stage, producer->Stage);
506 } else {
507 /* The rules for connecting inputs and outputs change in the presence
508 * of explicit locations. In this case, we no longer care about the
509 * names of the variables. Instead, we care only about the
510 * explicitly assigned location.
511 */
512 ir_variable *output = NULL;
513 if (input->data.explicit_location
514 && input->data.location >= VARYING_SLOT_VAR0) {
515
516 const glsl_type *type = get_varying_type(input, consumer->Stage);
517 unsigned num_elements = type->count_attribute_slots(false);
518 unsigned idx = input->data.location - VARYING_SLOT_VAR0;
519 unsigned slot_limit = idx + num_elements;
520
521 while (idx < slot_limit) {
522 output = explicit_locations[idx][input->data.location_frac];
523
524 if (output == NULL ||
525 input->data.location != output->data.location) {
526 linker_error(prog,
527 "%s shader input `%s' with explicit location "
528 "has no matching output\n",
529 _mesa_shader_stage_to_string(consumer->Stage),
530 input->name);
531 break;
532 }
533 idx++;
534 }
535 } else {
536 output = parameters.get_variable(input->name);
537 }
538
539 if (output != NULL) {
540 /* Interface blocks have their own validation elsewhere so don't
541 * try validating them here.
542 */
543 if (!(input->get_interface_type() &&
544 output->get_interface_type()))
545 cross_validate_types_and_qualifiers(prog, input, output,
546 consumer->Stage,
547 producer->Stage);
548 } else {
549 /* Check for input vars with unmatched output vars in prev stage
550 * taking into account that interface blocks could have a matching
551 * output but with different name, so we ignore them.
552 */
553 assert(!input->data.assigned);
554 if (input->data.used && !input->get_interface_type() &&
555 !input->data.explicit_location && !prog->SeparateShader)
556 linker_error(prog,
557 "%s shader input `%s' "
558 "has no matching output in the previous stage\n",
559 _mesa_shader_stage_to_string(consumer->Stage),
560 input->name);
561 }
562 }
563 }
564 }
565
566 /**
567 * Demote shader inputs and outputs that are not used in other stages, and
568 * remove them via dead code elimination.
569 */
570 static void
571 remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
572 gl_linked_shader *sh,
573 enum ir_variable_mode mode)
574 {
575 if (is_separate_shader_object)
576 return;
577
578 foreach_in_list(ir_instruction, node, sh->ir) {
579 ir_variable *const var = node->as_variable();
580
581 if (var == NULL || var->data.mode != int(mode))
582 continue;
583
584 /* A shader 'in' or 'out' variable is only really an input or output if
585 * its value is used by other shader stages. This will cause the
586 * variable to have a location assigned.
587 */
588 if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
589 assert(var->data.mode != ir_var_temporary);
590
591 /* Assign zeros to demoted inputs to allow more optimizations. */
592 if (var->data.mode == ir_var_shader_in && !var->constant_value)
593 var->constant_value = ir_constant::zero(var, var->type);
594
595 var->data.mode = ir_var_auto;
596 }
597 }
598
599 /* Eliminate code that is now dead due to unused inputs/outputs being
600 * demoted.
601 */
602 while (do_dead_code(sh->ir, false))
603 ;
604
605 }
606
607 /**
608 * Initialize this object based on a string that was passed to
609 * glTransformFeedbackVaryings.
610 *
611 * If the input is mal-formed, this call still succeeds, but it sets
612 * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
613 * will fail to find any matching variable.
614 */
615 void
616 tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
617 const char *input)
618 {
619 /* We don't have to be pedantic about what is a valid GLSL variable name,
620 * because any variable with an invalid name can't exist in the IR anyway.
621 */
622
623 this->location = -1;
624 this->orig_name = input;
625 this->lowered_builtin_array_variable = none;
626 this->skip_components = 0;
627 this->next_buffer_separator = false;
628 this->matched_candidate = NULL;
629 this->stream_id = 0;
630 this->buffer = 0;
631 this->offset = 0;
632
633 if (ctx->Extensions.ARB_transform_feedback3) {
634 /* Parse gl_NextBuffer. */
635 if (strcmp(input, "gl_NextBuffer") == 0) {
636 this->next_buffer_separator = true;
637 return;
638 }
639
640 /* Parse gl_SkipComponents. */
641 if (strcmp(input, "gl_SkipComponents1") == 0)
642 this->skip_components = 1;
643 else if (strcmp(input, "gl_SkipComponents2") == 0)
644 this->skip_components = 2;
645 else if (strcmp(input, "gl_SkipComponents3") == 0)
646 this->skip_components = 3;
647 else if (strcmp(input, "gl_SkipComponents4") == 0)
648 this->skip_components = 4;
649
650 if (this->skip_components)
651 return;
652 }
653
654 /* Parse a declaration. */
655 const char *base_name_end;
656 long subscript = parse_program_resource_name(input, &base_name_end);
657 this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
658 if (this->var_name == NULL) {
659 _mesa_error_no_memory(__func__);
660 return;
661 }
662
663 if (subscript >= 0) {
664 this->array_subscript = subscript;
665 this->is_subscripted = true;
666 } else {
667 this->is_subscripted = false;
668 }
669
670 /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
671 * class must behave specially to account for the fact that gl_ClipDistance
672 * is converted from a float[8] to a vec4[2].
673 */
674 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
675 strcmp(this->var_name, "gl_ClipDistance") == 0) {
676 this->lowered_builtin_array_variable = clip_distance;
677 }
678 if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
679 strcmp(this->var_name, "gl_CullDistance") == 0) {
680 this->lowered_builtin_array_variable = cull_distance;
681 }
682
683 if (ctx->Const.LowerTessLevel &&
684 (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
685 this->lowered_builtin_array_variable = tess_level_outer;
686 if (ctx->Const.LowerTessLevel &&
687 (strcmp(this->var_name, "gl_TessLevelInner") == 0))
688 this->lowered_builtin_array_variable = tess_level_inner;
689 }
690
691
692 /**
693 * Determine whether two tfeedback_decl objects refer to the same variable and
694 * array index (if applicable).
695 */
696 bool
697 tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
698 {
699 assert(x.is_varying() && y.is_varying());
700
701 if (strcmp(x.var_name, y.var_name) != 0)
702 return false;
703 if (x.is_subscripted != y.is_subscripted)
704 return false;
705 if (x.is_subscripted && x.array_subscript != y.array_subscript)
706 return false;
707 return true;
708 }
709
710
711 /**
712 * Assign a location and stream ID for this tfeedback_decl object based on the
713 * transform feedback candidate found by find_candidate.
714 *
715 * If an error occurs, the error is reported through linker_error() and false
716 * is returned.
717 */
718 bool
719 tfeedback_decl::assign_location(struct gl_context *ctx,
720 struct gl_shader_program *prog)
721 {
722 assert(this->is_varying());
723
724 unsigned fine_location
725 = this->matched_candidate->toplevel_var->data.location * 4
726 + this->matched_candidate->toplevel_var->data.location_frac
727 + this->matched_candidate->offset;
728 const unsigned dmul =
729 this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
730
731 if (this->matched_candidate->type->is_array()) {
732 /* Array variable */
733 const unsigned matrix_cols =
734 this->matched_candidate->type->fields.array->matrix_columns;
735 const unsigned vector_elements =
736 this->matched_candidate->type->fields.array->vector_elements;
737 unsigned actual_array_size;
738 switch (this->lowered_builtin_array_variable) {
739 case clip_distance:
740 actual_array_size = prog->last_vert_prog ?
741 prog->last_vert_prog->info.clip_distance_array_size : 0;
742 break;
743 case cull_distance:
744 actual_array_size = prog->last_vert_prog ?
745 prog->last_vert_prog->info.cull_distance_array_size : 0;
746 break;
747 case tess_level_outer:
748 actual_array_size = 4;
749 break;
750 case tess_level_inner:
751 actual_array_size = 2;
752 break;
753 case none:
754 default:
755 actual_array_size = this->matched_candidate->type->array_size();
756 break;
757 }
758
759 if (this->is_subscripted) {
760 /* Check array bounds. */
761 if (this->array_subscript >= actual_array_size) {
762 linker_error(prog, "Transform feedback varying %s has index "
763 "%i, but the array size is %u.",
764 this->orig_name, this->array_subscript,
765 actual_array_size);
766 return false;
767 }
768 unsigned array_elem_size = this->lowered_builtin_array_variable ?
769 1 : vector_elements * matrix_cols * dmul;
770 fine_location += array_elem_size * this->array_subscript;
771 this->size = 1;
772 } else {
773 this->size = actual_array_size;
774 }
775 this->vector_elements = vector_elements;
776 this->matrix_columns = matrix_cols;
777 if (this->lowered_builtin_array_variable)
778 this->type = GL_FLOAT;
779 else
780 this->type = this->matched_candidate->type->fields.array->gl_type;
781 } else {
782 /* Regular variable (scalar, vector, or matrix) */
783 if (this->is_subscripted) {
784 linker_error(prog, "Transform feedback varying %s requested, "
785 "but %s is not an array.",
786 this->orig_name, this->var_name);
787 return false;
788 }
789 this->size = 1;
790 this->vector_elements = this->matched_candidate->type->vector_elements;
791 this->matrix_columns = this->matched_candidate->type->matrix_columns;
792 this->type = this->matched_candidate->type->gl_type;
793 }
794 this->location = fine_location / 4;
795 this->location_frac = fine_location % 4;
796
797 /* From GL_EXT_transform_feedback:
798 * A program will fail to link if:
799 *
800 * * the total number of components to capture in any varying
801 * variable in <varyings> is greater than the constant
802 * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
803 * buffer mode is SEPARATE_ATTRIBS_EXT;
804 */
805 if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
806 this->num_components() >
807 ctx->Const.MaxTransformFeedbackSeparateComponents) {
808 linker_error(prog, "Transform feedback varying %s exceeds "
809 "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
810 this->orig_name);
811 return false;
812 }
813
814 /* Only transform feedback varyings can be assigned to non-zero streams,
815 * so assign the stream id here.
816 */
817 this->stream_id = this->matched_candidate->toplevel_var->data.stream;
818
819 unsigned array_offset = this->array_subscript * 4 * dmul;
820 unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
821 this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
822 this->offset = this->matched_candidate->toplevel_var->data.offset +
823 array_offset + struct_offset;
824
825 return true;
826 }
827
828
829 unsigned
830 tfeedback_decl::get_num_outputs() const
831 {
832 if (!this->is_varying()) {
833 return 0;
834 }
835 return (this->num_components() + this->location_frac + 3)/4;
836 }
837
838
839 /**
840 * Update gl_transform_feedback_info to reflect this tfeedback_decl.
841 *
842 * If an error occurs, the error is reported through linker_error() and false
843 * is returned.
844 */
845 bool
846 tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
847 struct gl_transform_feedback_info *info,
848 unsigned buffer, unsigned buffer_index,
849 const unsigned max_outputs, bool *explicit_stride,
850 bool has_xfb_qualifiers) const
851 {
852 unsigned xfb_offset = 0;
853 unsigned size = this->size;
854 /* Handle gl_SkipComponents. */
855 if (this->skip_components) {
856 info->Buffers[buffer].Stride += this->skip_components;
857 size = this->skip_components;
858 goto store_varying;
859 }
860
861 if (this->next_buffer_separator) {
862 size = 0;
863 goto store_varying;
864 }
865
866 if (has_xfb_qualifiers) {
867 xfb_offset = this->offset / 4;
868 } else {
869 xfb_offset = info->Buffers[buffer].Stride;
870 }
871 info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
872
873 {
874 unsigned location = this->location;
875 unsigned location_frac = this->location_frac;
876 unsigned num_components = this->num_components();
877 while (num_components > 0) {
878 unsigned output_size = MIN2(num_components, 4 - location_frac);
879 assert((info->NumOutputs == 0 && max_outputs == 0) ||
880 info->NumOutputs < max_outputs);
881
882 /* From the ARB_enhanced_layouts spec:
883 *
884 * "If such a block member or variable is not written during a shader
885 * invocation, the buffer contents at the assigned offset will be
886 * undefined. Even if there are no static writes to a variable or
887 * member that is assigned a transform feedback offset, the space is
888 * still allocated in the buffer and still affects the stride."
889 */
890 if (this->is_varying_written()) {
891 info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
892 info->Outputs[info->NumOutputs].OutputRegister = location;
893 info->Outputs[info->NumOutputs].NumComponents = output_size;
894 info->Outputs[info->NumOutputs].StreamId = stream_id;
895 info->Outputs[info->NumOutputs].OutputBuffer = buffer;
896 info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
897 ++info->NumOutputs;
898 }
899 info->Buffers[buffer].Stream = this->stream_id;
900 xfb_offset += output_size;
901
902 num_components -= output_size;
903 location++;
904 location_frac = 0;
905 }
906 }
907
908 if (explicit_stride && explicit_stride[buffer]) {
909 if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
910 linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
911 "multiple of 8 as its applied to a type that is or "
912 "contains a double.",
913 info->Buffers[buffer].Stride * 4);
914 return false;
915 }
916
917 if ((this->offset / 4) / info->Buffers[buffer].Stride !=
918 (xfb_offset - 1) / info->Buffers[buffer].Stride) {
919 linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
920 "buffer (%d)", xfb_offset * 4,
921 info->Buffers[buffer].Stride * 4, buffer);
922 return false;
923 }
924 } else {
925 info->Buffers[buffer].Stride = xfb_offset;
926 }
927
928 /* From GL_EXT_transform_feedback:
929 * A program will fail to link if:
930 *
931 * * the total number of components to capture is greater than
932 * the constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
933 * and the buffer mode is INTERLEAVED_ATTRIBS_EXT.
934 *
935 * From GL_ARB_enhanced_layouts:
936 *
937 * "The resulting stride (implicit or explicit) must be less than or
938 * equal to the implementation-dependent constant
939 * gl_MaxTransformFeedbackInterleavedComponents."
940 */
941 if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
942 has_xfb_qualifiers) &&
943 info->Buffers[buffer].Stride >
944 ctx->Const.MaxTransformFeedbackInterleavedComponents) {
945 linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
946 "limit has been exceeded.");
947 return false;
948 }
949
950 store_varying:
951 info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
952 this->orig_name);
953 info->Varyings[info->NumVarying].Type = this->type;
954 info->Varyings[info->NumVarying].Size = size;
955 info->Varyings[info->NumVarying].BufferIndex = buffer_index;
956 info->NumVarying++;
957 info->Buffers[buffer].NumVaryings++;
958
959 return true;
960 }
961
962
963 const tfeedback_candidate *
964 tfeedback_decl::find_candidate(gl_shader_program *prog,
965 hash_table *tfeedback_candidates)
966 {
967 const char *name = this->var_name;
968 switch (this->lowered_builtin_array_variable) {
969 case none:
970 name = this->var_name;
971 break;
972 case clip_distance:
973 name = "gl_ClipDistanceMESA";
974 break;
975 case cull_distance:
976 name = "gl_CullDistanceMESA";
977 break;
978 case tess_level_outer:
979 name = "gl_TessLevelOuterMESA";
980 break;
981 case tess_level_inner:
982 name = "gl_TessLevelInnerMESA";
983 break;
984 }
985 hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
986
987 this->matched_candidate = entry ?
988 (const tfeedback_candidate *) entry->data : NULL;
989
990 if (!this->matched_candidate) {
991 /* From GL_EXT_transform_feedback:
992 * A program will fail to link if:
993 *
994 * * any variable name specified in the <varyings> array is not
995 * declared as an output in the geometry shader (if present) or
996 * the vertex shader (if no geometry shader is present);
997 */
998 linker_error(prog, "Transform feedback varying %s undeclared.",
999 this->orig_name);
1000 }
1001
1002 return this->matched_candidate;
1003 }
1004
1005
1006 /**
1007 * Parse all the transform feedback declarations that were passed to
1008 * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
1009 *
1010 * If an error occurs, the error is reported through linker_error() and false
1011 * is returned.
1012 */
1013 static bool
1014 parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
1015 const void *mem_ctx, unsigned num_names,
1016 char **varying_names, tfeedback_decl *decls)
1017 {
1018 for (unsigned i = 0; i < num_names; ++i) {
1019 decls[i].init(ctx, mem_ctx, varying_names[i]);
1020
1021 if (!decls[i].is_varying())
1022 continue;
1023
1024 /* From GL_EXT_transform_feedback:
1025 * A program will fail to link if:
1026 *
1027 * * any two entries in the <varyings> array specify the same varying
1028 * variable;
1029 *
1030 * We interpret this to mean "any two entries in the <varyings> array
1031 * specify the same varying variable and array index", since transform
1032 * feedback of arrays would be useless otherwise.
1033 */
1034 for (unsigned j = 0; j < i; ++j) {
1035 if (!decls[j].is_varying())
1036 continue;
1037
1038 if (tfeedback_decl::is_same(decls[i], decls[j])) {
1039 linker_error(prog, "Transform feedback varying %s specified "
1040 "more than once.", varying_names[i]);
1041 return false;
1042 }
1043 }
1044 }
1045 return true;
1046 }
1047
1048
1049 static int
1050 cmp_xfb_offset(const void * x_generic, const void * y_generic)
1051 {
1052 tfeedback_decl *x = (tfeedback_decl *) x_generic;
1053 tfeedback_decl *y = (tfeedback_decl *) y_generic;
1054
1055 if (x->get_buffer() != y->get_buffer())
1056 return x->get_buffer() - y->get_buffer();
1057 return x->get_offset() - y->get_offset();
1058 }
1059
1060 /**
1061 * Store transform feedback location assignments into
1062 * prog->sh.LinkedTransformFeedback based on the data stored in
1063 * tfeedback_decls.
1064 *
1065 * If an error occurs, the error is reported through linker_error() and false
1066 * is returned.
1067 */
1068 static bool
1069 store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
1070 unsigned num_tfeedback_decls,
1071 tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers)
1072 {
1073 if (!prog->last_vert_prog)
1074 return true;
1075
1076 /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
1077 * tracking the number of buffers doesn't overflow.
1078 */
1079 assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
1080
1081 bool separate_attribs_mode =
1082 prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
1083
1084 struct gl_program *xfb_prog = prog->last_vert_prog;
1085 xfb_prog->sh.LinkedTransformFeedback =
1086 rzalloc(xfb_prog, struct gl_transform_feedback_info);
1087
1088 /* The xfb_offset qualifier does not have to be used in increasing order
1089 * however some drivers expect to receive the list of transform feedback
1090 * declarations in order so sort it now for convenience.
1091 */
1092 if (has_xfb_qualifiers)
1093 qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
1094 cmp_xfb_offset);
1095
1096 xfb_prog->sh.LinkedTransformFeedback->Varyings =
1097 rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
1098 num_tfeedback_decls);
1099
1100 unsigned num_outputs = 0;
1101 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1102 if (tfeedback_decls[i].is_varying_written())
1103 num_outputs += tfeedback_decls[i].get_num_outputs();
1104 }
1105
1106 xfb_prog->sh.LinkedTransformFeedback->Outputs =
1107 rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
1108 num_outputs);
1109
1110 unsigned num_buffers = 0;
1111 unsigned buffers = 0;
1112
1113 if (!has_xfb_qualifiers && separate_attribs_mode) {
1114 /* GL_SEPARATE_ATTRIBS */
1115 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1116 if (!tfeedback_decls[i].store(ctx, prog,
1117 xfb_prog->sh.LinkedTransformFeedback,
1118 num_buffers, num_buffers, num_outputs,
1119 NULL, has_xfb_qualifiers))
1120 return false;
1121
1122 buffers |= 1 << num_buffers;
1123 num_buffers++;
1124 }
1125 }
1126 else {
1127 /* GL_INVERLEAVED_ATTRIBS */
1128 int buffer_stream_id = -1;
1129 unsigned buffer =
1130 num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
1131 bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
1132
1133 /* Apply any xfb_stride global qualifiers */
1134 if (has_xfb_qualifiers) {
1135 for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
1136 if (prog->TransformFeedback.BufferStride[j]) {
1137 buffers |= 1 << j;
1138 explicit_stride[j] = true;
1139 xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
1140 prog->TransformFeedback.BufferStride[j] / 4;
1141 }
1142 }
1143 }
1144
1145 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
1146 if (has_xfb_qualifiers &&
1147 buffer != tfeedback_decls[i].get_buffer()) {
1148 /* we have moved to the next buffer so reset stream id */
1149 buffer_stream_id = -1;
1150 num_buffers++;
1151 }
1152
1153 if (tfeedback_decls[i].is_next_buffer_separator()) {
1154 if (!tfeedback_decls[i].store(ctx, prog,
1155 xfb_prog->sh.LinkedTransformFeedback,
1156 buffer, num_buffers, num_outputs,
1157 explicit_stride, has_xfb_qualifiers))
1158 return false;
1159 num_buffers++;
1160 buffer_stream_id = -1;
1161 continue;
1162 } else if (tfeedback_decls[i].is_varying()) {
1163 if (buffer_stream_id == -1) {
1164 /* First varying writing to this buffer: remember its stream */
1165 buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
1166 } else if (buffer_stream_id !=
1167 (int) tfeedback_decls[i].get_stream_id()) {
1168 /* Varying writes to the same buffer from a different stream */
1169 linker_error(prog,
1170 "Transform feedback can't capture varyings belonging "
1171 "to different vertex streams in a single buffer. "
1172 "Varying %s writes to buffer from stream %u, other "
1173 "varyings in the same buffer write from stream %u.",
1174 tfeedback_decls[i].name(),
1175 tfeedback_decls[i].get_stream_id(),
1176 buffer_stream_id);
1177 return false;
1178 }
1179 }
1180
1181 if (has_xfb_qualifiers) {
1182 buffer = tfeedback_decls[i].get_buffer();
1183 } else {
1184 buffer = num_buffers;
1185 }
1186 buffers |= 1 << buffer;
1187
1188 if (!tfeedback_decls[i].store(ctx, prog,
1189 xfb_prog->sh.LinkedTransformFeedback,
1190 buffer, num_buffers, num_outputs,
1191 explicit_stride, has_xfb_qualifiers))
1192 return false;
1193 }
1194 }
1195
1196 assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
1197
1198 xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
1199 return true;
1200 }
1201
1202 namespace {
1203
1204 /**
1205 * Data structure recording the relationship between outputs of one shader
1206 * stage (the "producer") and inputs of another (the "consumer").
1207 */
1208 class varying_matches
1209 {
1210 public:
1211 varying_matches(bool disable_varying_packing, bool xfb_enabled,
1212 bool enhanced_layouts_enabled,
1213 gl_shader_stage producer_stage,
1214 gl_shader_stage consumer_stage);
1215 ~varying_matches();
1216 void record(ir_variable *producer_var, ir_variable *consumer_var);
1217 unsigned assign_locations(struct gl_shader_program *prog,
1218 uint8_t *components,
1219 uint64_t reserved_slots);
1220 void store_locations() const;
1221
1222 private:
1223 bool is_varying_packing_safe(const glsl_type *type,
1224 const ir_variable *var);
1225
1226 /**
1227 * If true, this driver disables varying packing, so all varyings need to
1228 * be aligned on slot boundaries, and take up a number of slots equal to
1229 * their number of matrix columns times their array size.
1230 *
1231 * Packing may also be disabled because our current packing method is not
1232 * safe in SSO or versions of OpenGL where interpolation qualifiers are not
1233 * guaranteed to match across stages.
1234 */
1235 const bool disable_varying_packing;
1236
1237 /**
1238 * If true, this driver has transform feedback enabled. The transform
1239 * feedback code requires at least some packing be done even when varying
1240 * packing is disabled, fortunately where transform feedback requires
1241 * packing it's safe to override the disabled setting. See
1242 * is_varying_packing_safe().
1243 */
1244 const bool xfb_enabled;
1245
1246 const bool enhanced_layouts_enabled;
1247
1248 /**
1249 * Enum representing the order in which varyings are packed within a
1250 * packing class.
1251 *
1252 * Currently we pack vec4's first, then vec2's, then scalar values, then
1253 * vec3's. This order ensures that the only vectors that are at risk of
1254 * having to be "double parked" (split between two adjacent varying slots)
1255 * are the vec3's.
1256 */
1257 enum packing_order_enum {
1258 PACKING_ORDER_VEC4,
1259 PACKING_ORDER_VEC2,
1260 PACKING_ORDER_SCALAR,
1261 PACKING_ORDER_VEC3,
1262 };
1263
1264 static unsigned compute_packing_class(const ir_variable *var);
1265 static packing_order_enum compute_packing_order(const ir_variable *var);
1266 static int match_comparator(const void *x_generic, const void *y_generic);
1267 static int xfb_comparator(const void *x_generic, const void *y_generic);
1268
1269 /**
1270 * Structure recording the relationship between a single producer output
1271 * and a single consumer input.
1272 */
1273 struct match {
1274 /**
1275 * Packing class for this varying, computed by compute_packing_class().
1276 */
1277 unsigned packing_class;
1278
1279 /**
1280 * Packing order for this varying, computed by compute_packing_order().
1281 */
1282 packing_order_enum packing_order;
1283 unsigned num_components;
1284
1285 /**
1286 * The output variable in the producer stage.
1287 */
1288 ir_variable *producer_var;
1289
1290 /**
1291 * The input variable in the consumer stage.
1292 */
1293 ir_variable *consumer_var;
1294
1295 /**
1296 * The location which has been assigned for this varying. This is
1297 * expressed in multiples of a float, with the first generic varying
1298 * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
1299 * value 0.
1300 */
1301 unsigned generic_location;
1302 } *matches;
1303
1304 /**
1305 * The number of elements in the \c matches array that are currently in
1306 * use.
1307 */
1308 unsigned num_matches;
1309
1310 /**
1311 * The number of elements that were set aside for the \c matches array when
1312 * it was allocated.
1313 */
1314 unsigned matches_capacity;
1315
1316 gl_shader_stage producer_stage;
1317 gl_shader_stage consumer_stage;
1318 };
1319
1320 } /* anonymous namespace */
1321
1322 varying_matches::varying_matches(bool disable_varying_packing,
1323 bool xfb_enabled,
1324 bool enhanced_layouts_enabled,
1325 gl_shader_stage producer_stage,
1326 gl_shader_stage consumer_stage)
1327 : disable_varying_packing(disable_varying_packing),
1328 xfb_enabled(xfb_enabled),
1329 enhanced_layouts_enabled(enhanced_layouts_enabled),
1330 producer_stage(producer_stage),
1331 consumer_stage(consumer_stage)
1332 {
1333 /* Note: this initial capacity is rather arbitrarily chosen to be large
1334 * enough for many cases without wasting an unreasonable amount of space.
1335 * varying_matches::record() will resize the array if there are more than
1336 * this number of varyings.
1337 */
1338 this->matches_capacity = 8;
1339 this->matches = (match *)
1340 malloc(sizeof(*this->matches) * this->matches_capacity);
1341 this->num_matches = 0;
1342 }
1343
1344
1345 varying_matches::~varying_matches()
1346 {
1347 free(this->matches);
1348 }
1349
1350
1351 /**
1352 * Packing is always safe on individual arrays, structures, and matrices. It
1353 * is also safe if the varying is only used for transform feedback.
1354 */
1355 bool
1356 varying_matches::is_varying_packing_safe(const glsl_type *type,
1357 const ir_variable *var)
1358 {
1359 if (consumer_stage == MESA_SHADER_TESS_EVAL ||
1360 consumer_stage == MESA_SHADER_TESS_CTRL ||
1361 producer_stage == MESA_SHADER_TESS_CTRL)
1362 return false;
1363
1364 return xfb_enabled && (type->is_array() || type->is_record() ||
1365 type->is_matrix() || var->data.is_xfb_only);
1366 }
1367
1368
1369 /**
1370 * Record the given producer/consumer variable pair in the list of variables
1371 * that should later be assigned locations.
1372 *
1373 * It is permissible for \c consumer_var to be NULL (this happens if a
1374 * variable is output by the producer and consumed by transform feedback, but
1375 * not consumed by the consumer).
1376 *
1377 * If \c producer_var has already been paired up with a consumer_var, or
1378 * producer_var is part of fixed pipeline functionality (and hence already has
1379 * a location assigned), this function has no effect.
1380 *
1381 * Note: as a side effect this function may change the interpolation type of
1382 * \c producer_var, but only when the change couldn't possibly affect
1383 * rendering.
1384 */
1385 void
1386 varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
1387 {
1388 assert(producer_var != NULL || consumer_var != NULL);
1389
1390 if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
1391 producer_var->data.explicit_location)) ||
1392 (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
1393 consumer_var->data.explicit_location))) {
1394 /* Either a location already exists for this variable (since it is part
1395 * of fixed functionality), or it has already been recorded as part of a
1396 * previous match.
1397 */
1398 return;
1399 }
1400
1401 bool needs_flat_qualifier = consumer_var == NULL &&
1402 (producer_var->type->contains_integer() ||
1403 producer_var->type->contains_double());
1404
1405 if (!disable_varying_packing &&
1406 (needs_flat_qualifier ||
1407 (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
1408 /* Since this varying is not being consumed by the fragment shader, its
1409 * interpolation type varying cannot possibly affect rendering.
1410 * Also, this variable is non-flat and is (or contains) an integer
1411 * or a double.
1412 * If the consumer stage is unknown, don't modify the interpolation
1413 * type as it could affect rendering later with separate shaders.
1414 *
1415 * lower_packed_varyings requires all integer varyings to flat,
1416 * regardless of where they appear. We can trivially satisfy that
1417 * requirement by changing the interpolation type to flat here.
1418 */
1419 if (producer_var) {
1420 producer_var->data.centroid = false;
1421 producer_var->data.sample = false;
1422 producer_var->data.interpolation = INTERP_MODE_FLAT;
1423 }
1424
1425 if (consumer_var) {
1426 consumer_var->data.centroid = false;
1427 consumer_var->data.sample = false;
1428 consumer_var->data.interpolation = INTERP_MODE_FLAT;
1429 }
1430 }
1431
1432 if (this->num_matches == this->matches_capacity) {
1433 this->matches_capacity *= 2;
1434 this->matches = (match *)
1435 realloc(this->matches,
1436 sizeof(*this->matches) * this->matches_capacity);
1437 }
1438
1439 /* We must use the consumer to compute the packing class because in GL4.4+
1440 * there is no guarantee interpolation qualifiers will match across stages.
1441 *
1442 * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
1443 *
1444 * "The type and presence of interpolation qualifiers of variables with
1445 * the same name declared in all linked shaders for the same cross-stage
1446 * interface must match, otherwise the link command will fail.
1447 *
1448 * When comparing an output from one stage to an input of a subsequent
1449 * stage, the input and output don't match if their interpolation
1450 * qualifiers (or lack thereof) are not the same."
1451 *
1452 * This text was also in at least revison 7 of the 4.40 spec but is no
1453 * longer in revision 9 and not in the 4.50 spec.
1454 */
1455 const ir_variable *const var = (consumer_var != NULL)
1456 ? consumer_var : producer_var;
1457 const gl_shader_stage stage = (consumer_var != NULL)
1458 ? consumer_stage : producer_stage;
1459 const glsl_type *type = get_varying_type(var, stage);
1460
1461 if (producer_var && consumer_var &&
1462 consumer_var->data.must_be_shader_input) {
1463 producer_var->data.must_be_shader_input = 1;
1464 }
1465
1466 this->matches[this->num_matches].packing_class
1467 = this->compute_packing_class(var);
1468 this->matches[this->num_matches].packing_order
1469 = this->compute_packing_order(var);
1470 if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
1471 var->data.must_be_shader_input) {
1472 unsigned slots = type->count_attribute_slots(false);
1473 this->matches[this->num_matches].num_components = slots * 4;
1474 } else {
1475 this->matches[this->num_matches].num_components
1476 = type->component_slots();
1477 }
1478
1479 this->matches[this->num_matches].producer_var = producer_var;
1480 this->matches[this->num_matches].consumer_var = consumer_var;
1481 this->num_matches++;
1482 if (producer_var)
1483 producer_var->data.is_unmatched_generic_inout = 0;
1484 if (consumer_var)
1485 consumer_var->data.is_unmatched_generic_inout = 0;
1486 }
1487
1488
1489 /**
1490 * Choose locations for all of the variable matches that were previously
1491 * passed to varying_matches::record().
1492 */
1493 unsigned
1494 varying_matches::assign_locations(struct gl_shader_program *prog,
1495 uint8_t *components,
1496 uint64_t reserved_slots)
1497 {
1498 /* If packing has been disabled then we cannot safely sort the varyings by
1499 * class as it may mean we are using a version of OpenGL where
1500 * interpolation qualifiers are not guaranteed to be matching across
1501 * shaders, sorting in this case could result in mismatching shader
1502 * interfaces.
1503 * When packing is disabled the sort orders varyings used by transform
1504 * feedback first, but also depends on *undefined behaviour* of qsort to
1505 * reverse the order of the varyings. See: xfb_comparator().
1506 */
1507 if (!this->disable_varying_packing) {
1508 /* Sort varying matches into an order that makes them easy to pack. */
1509 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1510 &varying_matches::match_comparator);
1511 } else {
1512 /* Only sort varyings that are only used by transform feedback. */
1513 qsort(this->matches, this->num_matches, sizeof(*this->matches),
1514 &varying_matches::xfb_comparator);
1515 }
1516
1517 unsigned generic_location = 0;
1518 unsigned generic_patch_location = MAX_VARYING*4;
1519 bool previous_var_xfb_only = false;
1520
1521 for (unsigned i = 0; i < this->num_matches; i++) {
1522 unsigned *location = &generic_location;
1523
1524 const ir_variable *var;
1525 const glsl_type *type;
1526 bool is_vertex_input = false;
1527 if (matches[i].consumer_var) {
1528 var = matches[i].consumer_var;
1529 type = get_varying_type(var, consumer_stage);
1530 if (consumer_stage == MESA_SHADER_VERTEX)
1531 is_vertex_input = true;
1532 } else {
1533 var = matches[i].producer_var;
1534 type = get_varying_type(var, producer_stage);
1535 }
1536
1537 if (var->data.patch)
1538 location = &generic_patch_location;
1539
1540 /* Advance to the next slot if this varying has a different packing
1541 * class than the previous one, and we're not already on a slot
1542 * boundary.
1543 *
1544 * Also advance to the next slot if packing is disabled. This makes sure
1545 * we don't assign varyings the same locations which is possible
1546 * because we still pack individual arrays, records and matrices even
1547 * when packing is disabled. Note we don't advance to the next slot if
1548 * we can pack varyings together that are only used for transform
1549 * feedback.
1550 */
1551 if (var->data.must_be_shader_input ||
1552 (this->disable_varying_packing &&
1553 !(previous_var_xfb_only && var->data.is_xfb_only)) ||
1554 (i > 0 && this->matches[i - 1].packing_class
1555 != this->matches[i].packing_class )) {
1556 *location = ALIGN(*location, 4);
1557 }
1558
1559 previous_var_xfb_only = var->data.is_xfb_only;
1560
1561 /* The number of components taken up by this variable. For vertex shader
1562 * inputs, we use the number of slots * 4, as they have different
1563 * counting rules.
1564 */
1565 unsigned num_components = is_vertex_input ?
1566 type->count_attribute_slots(is_vertex_input) * 4 :
1567 this->matches[i].num_components;
1568
1569 /* The last slot for this variable, inclusive. */
1570 unsigned slot_end = *location + num_components - 1;
1571
1572 /* FIXME: We could be smarter in the below code and loop back over
1573 * trying to fill any locations that we skipped because we couldn't pack
1574 * the varying between an explicit location. For now just let the user
1575 * hit the linking error if we run out of room and suggest they use
1576 * explicit locations.
1577 */
1578 while (slot_end < MAX_VARYING * 4u) {
1579 const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
1580 const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
1581
1582 assert(slots > 0);
1583 if (reserved_slots & slot_mask) {
1584 *location = ALIGN(*location + 1, 4);
1585 slot_end = *location + num_components - 1;
1586 continue;
1587 }
1588
1589 break;
1590 }
1591
1592 if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
1593 linker_error(prog, "insufficient contiguous locations available for "
1594 "%s it is possible an array or struct could not be "
1595 "packed between varyings with explicit locations. Try "
1596 "using an explicit location for arrays and structs.",
1597 var->name);
1598 }
1599
1600 if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
1601 for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
1602 components[j] = 4;
1603 components[slot_end / 4u] = (slot_end & 3) + 1;
1604 }
1605
1606 this->matches[i].generic_location = *location;
1607
1608 *location = slot_end + 1;
1609 }
1610
1611 return (generic_location + 3) / 4;
1612 }
1613
1614
1615 /**
1616 * Update the producer and consumer shaders to reflect the locations
1617 * assignments that were made by varying_matches::assign_locations().
1618 */
1619 void
1620 varying_matches::store_locations() const
1621 {
1622 /* Check is location needs to be packed with lower_packed_varyings() or if
1623 * we can just use ARB_enhanced_layouts packing.
1624 */
1625 bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
1626 const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
1627
1628 for (unsigned i = 0; i < this->num_matches; i++) {
1629 ir_variable *producer_var = this->matches[i].producer_var;
1630 ir_variable *consumer_var = this->matches[i].consumer_var;
1631 unsigned generic_location = this->matches[i].generic_location;
1632 unsigned slot = generic_location / 4;
1633 unsigned offset = generic_location % 4;
1634
1635 if (producer_var) {
1636 producer_var->data.location = VARYING_SLOT_VAR0 + slot;
1637 producer_var->data.location_frac = offset;
1638 }
1639
1640 if (consumer_var) {
1641 assert(consumer_var->data.location == -1);
1642 consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
1643 consumer_var->data.location_frac = offset;
1644 }
1645
1646 /* Find locations suitable for native packing via
1647 * ARB_enhanced_layouts.
1648 */
1649 if (producer_var && consumer_var) {
1650 if (enhanced_layouts_enabled) {
1651 const glsl_type *type =
1652 get_varying_type(producer_var, producer_stage);
1653 if (type->is_array() || type->is_matrix() || type->is_record() ||
1654 type->is_double()) {
1655 unsigned comp_slots = type->component_slots() + offset;
1656 unsigned slots = comp_slots / 4;
1657 if (comp_slots % 4)
1658 slots += 1;
1659
1660 for (unsigned j = 0; j < slots; j++) {
1661 pack_loc[slot + j] = true;
1662 }
1663 } else if (offset + type->vector_elements > 4) {
1664 pack_loc[slot] = true;
1665 pack_loc[slot + 1] = true;
1666 } else {
1667 loc_type[slot][offset] = type;
1668 }
1669 }
1670 }
1671 }
1672
1673 /* Attempt to use ARB_enhanced_layouts for more efficient packing if
1674 * suitable.
1675 */
1676 if (enhanced_layouts_enabled) {
1677 for (unsigned i = 0; i < this->num_matches; i++) {
1678 ir_variable *producer_var = this->matches[i].producer_var;
1679 ir_variable *consumer_var = this->matches[i].consumer_var;
1680 unsigned generic_location = this->matches[i].generic_location;
1681 unsigned slot = generic_location / 4;
1682
1683 if (pack_loc[slot] || !producer_var || !consumer_var)
1684 continue;
1685
1686 const glsl_type *type =
1687 get_varying_type(producer_var, producer_stage);
1688 bool type_match = true;
1689 for (unsigned j = 0; j < 4; j++) {
1690 if (loc_type[slot][j]) {
1691 if (type->base_type != loc_type[slot][j]->base_type)
1692 type_match = false;
1693 }
1694 }
1695
1696 if (type_match) {
1697 producer_var->data.explicit_location = 1;
1698 consumer_var->data.explicit_location = 1;
1699 producer_var->data.explicit_component = 1;
1700 consumer_var->data.explicit_component = 1;
1701 }
1702 }
1703 }
1704 }
1705
1706
1707 /**
1708 * Compute the "packing class" of the given varying. This is an unsigned
1709 * integer with the property that two variables in the same packing class can
1710 * be safely backed into the same vec4.
1711 */
1712 unsigned
1713 varying_matches::compute_packing_class(const ir_variable *var)
1714 {
1715 /* Without help from the back-end, there is no way to pack together
1716 * variables with different interpolation types, because
1717 * lower_packed_varyings must choose exactly one interpolation type for
1718 * each packed varying it creates.
1719 *
1720 * However, we can safely pack together floats, ints, and uints, because:
1721 *
1722 * - varyings of base type "int" and "uint" must use the "flat"
1723 * interpolation type, which can only occur in GLSL 1.30 and above.
1724 *
1725 * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
1726 * can store flat floats as ints without losing any information (using
1727 * the ir_unop_bitcast_* opcodes).
1728 *
1729 * Therefore, the packing class depends only on the interpolation type.
1730 */
1731 unsigned packing_class = var->data.centroid | (var->data.sample << 1) |
1732 (var->data.patch << 2) |
1733 (var->data.must_be_shader_input << 3);
1734 packing_class *= 8;
1735 packing_class += var->is_interpolation_flat()
1736 ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
1737 return packing_class;
1738 }
1739
1740
1741 /**
1742 * Compute the "packing order" of the given varying. This is a sort key we
1743 * use to determine when to attempt to pack the given varying relative to
1744 * other varyings in the same packing class.
1745 */
1746 varying_matches::packing_order_enum
1747 varying_matches::compute_packing_order(const ir_variable *var)
1748 {
1749 const glsl_type *element_type = var->type;
1750
1751 while (element_type->is_array()) {
1752 element_type = element_type->fields.array;
1753 }
1754
1755 switch (element_type->component_slots() % 4) {
1756 case 1: return PACKING_ORDER_SCALAR;
1757 case 2: return PACKING_ORDER_VEC2;
1758 case 3: return PACKING_ORDER_VEC3;
1759 case 0: return PACKING_ORDER_VEC4;
1760 default:
1761 assert(!"Unexpected value of vector_elements");
1762 return PACKING_ORDER_VEC4;
1763 }
1764 }
1765
1766
1767 /**
1768 * Comparison function passed to qsort() to sort varyings by packing_class and
1769 * then by packing_order.
1770 */
1771 int
1772 varying_matches::match_comparator(const void *x_generic, const void *y_generic)
1773 {
1774 const match *x = (const match *) x_generic;
1775 const match *y = (const match *) y_generic;
1776
1777 if (x->packing_class != y->packing_class)
1778 return x->packing_class - y->packing_class;
1779 return x->packing_order - y->packing_order;
1780 }
1781
1782
1783 /**
1784 * Comparison function passed to qsort() to sort varyings used only by
1785 * transform feedback when packing of other varyings is disabled.
1786 */
1787 int
1788 varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
1789 {
1790 const match *x = (const match *) x_generic;
1791
1792 if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
1793 return match_comparator(x_generic, y_generic);
1794
1795 /* FIXME: When the comparator returns 0 it means the elements being
1796 * compared are equivalent. However the qsort documentation says:
1797 *
1798 * "The order of equivalent elements is undefined."
1799 *
1800 * In practice the sort ends up reversing the order of the varyings which
1801 * means locations are also assigned in this reversed order and happens to
1802 * be what we want. This is also whats happening in
1803 * varying_matches::match_comparator().
1804 */
1805 return 0;
1806 }
1807
1808
1809 /**
1810 * Is the given variable a varying variable to be counted against the
1811 * limit in ctx->Const.MaxVarying?
1812 * This includes variables such as texcoords, colors and generic
1813 * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
1814 */
1815 static bool
1816 var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
1817 {
1818 /* Only fragment shaders will take a varying variable as an input */
1819 if (stage == MESA_SHADER_FRAGMENT &&
1820 var->data.mode == ir_var_shader_in) {
1821 switch (var->data.location) {
1822 case VARYING_SLOT_POS:
1823 case VARYING_SLOT_FACE:
1824 case VARYING_SLOT_PNTC:
1825 return false;
1826 default:
1827 return true;
1828 }
1829 }
1830 return false;
1831 }
1832
1833
1834 /**
1835 * Visitor class that generates tfeedback_candidate structs describing all
1836 * possible targets of transform feedback.
1837 *
1838 * tfeedback_candidate structs are stored in the hash table
1839 * tfeedback_candidates, which is passed to the constructor. This hash table
1840 * maps varying names to instances of the tfeedback_candidate struct.
1841 */
1842 class tfeedback_candidate_generator : public program_resource_visitor
1843 {
1844 public:
1845 tfeedback_candidate_generator(void *mem_ctx,
1846 hash_table *tfeedback_candidates)
1847 : mem_ctx(mem_ctx),
1848 tfeedback_candidates(tfeedback_candidates),
1849 toplevel_var(NULL),
1850 varying_floats(0)
1851 {
1852 }
1853
1854 void process(ir_variable *var)
1855 {
1856 /* All named varying interface blocks should be flattened by now */
1857 assert(!var->is_interface_instance());
1858
1859 this->toplevel_var = var;
1860 this->varying_floats = 0;
1861 program_resource_visitor::process(var, false);
1862 }
1863
1864 private:
1865 virtual void visit_field(const glsl_type *type, const char *name,
1866 bool /* row_major */,
1867 const glsl_type * /* record_type */,
1868 const enum glsl_interface_packing,
1869 bool /* last_field */)
1870 {
1871 assert(!type->without_array()->is_record());
1872 assert(!type->without_array()->is_interface());
1873
1874 tfeedback_candidate *candidate
1875 = rzalloc(this->mem_ctx, tfeedback_candidate);
1876 candidate->toplevel_var = this->toplevel_var;
1877 candidate->type = type;
1878 candidate->offset = this->varying_floats;
1879 _mesa_hash_table_insert(this->tfeedback_candidates,
1880 ralloc_strdup(this->mem_ctx, name),
1881 candidate);
1882 this->varying_floats += type->component_slots();
1883 }
1884
1885 /**
1886 * Memory context used to allocate hash table keys and values.
1887 */
1888 void * const mem_ctx;
1889
1890 /**
1891 * Hash table in which tfeedback_candidate objects should be stored.
1892 */
1893 hash_table * const tfeedback_candidates;
1894
1895 /**
1896 * Pointer to the toplevel variable that is being traversed.
1897 */
1898 ir_variable *toplevel_var;
1899
1900 /**
1901 * Total number of varying floats that have been visited so far. This is
1902 * used to determine the offset to each varying within the toplevel
1903 * variable.
1904 */
1905 unsigned varying_floats;
1906 };
1907
1908
1909 namespace linker {
1910
1911 void
1912 populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
1913 hash_table *consumer_inputs,
1914 hash_table *consumer_interface_inputs,
1915 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
1916 {
1917 memset(consumer_inputs_with_locations,
1918 0,
1919 sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
1920
1921 foreach_in_list(ir_instruction, node, ir) {
1922 ir_variable *const input_var = node->as_variable();
1923
1924 if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
1925 /* All interface blocks should have been lowered by this point */
1926 assert(!input_var->type->is_interface());
1927
1928 if (input_var->data.explicit_location) {
1929 /* assign_varying_locations only cares about finding the
1930 * ir_variable at the start of a contiguous location block.
1931 *
1932 * - For !producer, consumer_inputs_with_locations isn't used.
1933 *
1934 * - For !consumer, consumer_inputs_with_locations is empty.
1935 *
1936 * For consumer && producer, if you were trying to set some
1937 * ir_variable to the middle of a location block on the other side
1938 * of producer/consumer, cross_validate_outputs_to_inputs() should
1939 * be link-erroring due to either type mismatch or location
1940 * overlaps. If the variables do match up, then they've got a
1941 * matching data.location and you only looked at
1942 * consumer_inputs_with_locations[var->data.location], not any
1943 * following entries for the array/structure.
1944 */
1945 consumer_inputs_with_locations[input_var->data.location] =
1946 input_var;
1947 } else if (input_var->get_interface_type() != NULL) {
1948 char *const iface_field_name =
1949 ralloc_asprintf(mem_ctx, "%s.%s",
1950 input_var->get_interface_type()->without_array()->name,
1951 input_var->name);
1952 _mesa_hash_table_insert(consumer_interface_inputs,
1953 iface_field_name, input_var);
1954 } else {
1955 _mesa_hash_table_insert(consumer_inputs,
1956 ralloc_strdup(mem_ctx, input_var->name),
1957 input_var);
1958 }
1959 }
1960 }
1961 }
1962
1963 /**
1964 * Find a variable from the consumer that "matches" the specified variable
1965 *
1966 * This function only finds inputs with names that match. There is no
1967 * validation (here) that the types, etc. are compatible.
1968 */
1969 ir_variable *
1970 get_matching_input(void *mem_ctx,
1971 const ir_variable *output_var,
1972 hash_table *consumer_inputs,
1973 hash_table *consumer_interface_inputs,
1974 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
1975 {
1976 ir_variable *input_var;
1977
1978 if (output_var->data.explicit_location) {
1979 input_var = consumer_inputs_with_locations[output_var->data.location];
1980 } else if (output_var->get_interface_type() != NULL) {
1981 char *const iface_field_name =
1982 ralloc_asprintf(mem_ctx, "%s.%s",
1983 output_var->get_interface_type()->without_array()->name,
1984 output_var->name);
1985 hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
1986 input_var = entry ? (ir_variable *) entry->data : NULL;
1987 } else {
1988 hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
1989 input_var = entry ? (ir_variable *) entry->data : NULL;
1990 }
1991
1992 return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
1993 ? NULL : input_var;
1994 }
1995
1996 }
1997
1998 static int
1999 io_variable_cmp(const void *_a, const void *_b)
2000 {
2001 const ir_variable *const a = *(const ir_variable **) _a;
2002 const ir_variable *const b = *(const ir_variable **) _b;
2003
2004 if (a->data.explicit_location && b->data.explicit_location)
2005 return b->data.location - a->data.location;
2006
2007 if (a->data.explicit_location && !b->data.explicit_location)
2008 return 1;
2009
2010 if (!a->data.explicit_location && b->data.explicit_location)
2011 return -1;
2012
2013 return -strcmp(a->name, b->name);
2014 }
2015
2016 /**
2017 * Sort the shader IO variables into canonical order
2018 */
2019 static void
2020 canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
2021 {
2022 ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
2023 unsigned num_variables = 0;
2024
2025 foreach_in_list(ir_instruction, node, ir) {
2026 ir_variable *const var = node->as_variable();
2027
2028 if (var == NULL || var->data.mode != io_mode)
2029 continue;
2030
2031 /* If we have already encountered more I/O variables that could
2032 * successfully link, bail.
2033 */
2034 if (num_variables == ARRAY_SIZE(var_table))
2035 return;
2036
2037 var_table[num_variables++] = var;
2038 }
2039
2040 if (num_variables == 0)
2041 return;
2042
2043 /* Sort the list in reverse order (io_variable_cmp handles this). Later
2044 * we're going to push the variables on to the IR list as a stack, so we
2045 * want the last variable (in canonical order) to be first in the list.
2046 */
2047 qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
2048
2049 /* Remove the variable from it's current location in the IR, and put it at
2050 * the front.
2051 */
2052 for (unsigned i = 0; i < num_variables; i++) {
2053 var_table[i]->remove();
2054 ir->push_head(var_table[i]);
2055 }
2056 }
2057
2058 /**
2059 * Generate a bitfield map of the explicit locations for shader varyings.
2060 *
2061 * Note: For Tessellation shaders we are sitting right on the limits of the
2062 * 64 bit map. Per-vertex and per-patch both have separate location domains
2063 * with a max of MAX_VARYING.
2064 */
2065 static uint64_t
2066 reserved_varying_slot(struct gl_linked_shader *stage,
2067 ir_variable_mode io_mode)
2068 {
2069 assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
2070 /* Avoid an overflow of the returned value */
2071 assert(MAX_VARYINGS_INCL_PATCH <= 64);
2072
2073 uint64_t slots = 0;
2074 int var_slot;
2075
2076 if (!stage)
2077 return slots;
2078
2079 foreach_in_list(ir_instruction, node, stage->ir) {
2080 ir_variable *const var = node->as_variable();
2081
2082 if (var == NULL || var->data.mode != io_mode ||
2083 !var->data.explicit_location ||
2084 var->data.location < VARYING_SLOT_VAR0)
2085 continue;
2086
2087 var_slot = var->data.location - VARYING_SLOT_VAR0;
2088
2089 unsigned num_elements = get_varying_type(var, stage->Stage)
2090 ->count_attribute_slots(io_mode == ir_var_shader_in &&
2091 stage->Stage == MESA_SHADER_VERTEX);
2092 for (unsigned i = 0; i < num_elements; i++) {
2093 if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
2094 slots |= UINT64_C(1) << var_slot;
2095 var_slot += 1;
2096 }
2097 }
2098
2099 return slots;
2100 }
2101
2102
2103 /**
2104 * Assign locations for all variables that are produced in one pipeline stage
2105 * (the "producer") and consumed in the next stage (the "consumer").
2106 *
2107 * Variables produced by the producer may also be consumed by transform
2108 * feedback.
2109 *
2110 * \param num_tfeedback_decls is the number of declarations indicating
2111 * variables that may be consumed by transform feedback.
2112 *
2113 * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
2114 * representing the result of parsing the strings passed to
2115 * glTransformFeedbackVaryings(). assign_location() will be called for
2116 * each of these objects that matches one of the outputs of the
2117 * producer.
2118 *
2119 * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
2120 * be NULL. In this case, varying locations are assigned solely based on the
2121 * requirements of transform feedback.
2122 */
2123 static bool
2124 assign_varying_locations(struct gl_context *ctx,
2125 void *mem_ctx,
2126 struct gl_shader_program *prog,
2127 gl_linked_shader *producer,
2128 gl_linked_shader *consumer,
2129 unsigned num_tfeedback_decls,
2130 tfeedback_decl *tfeedback_decls,
2131 const uint64_t reserved_slots)
2132 {
2133 /* Tessellation shaders treat inputs and outputs as shared memory and can
2134 * access inputs and outputs of other invocations.
2135 * Therefore, they can't be lowered to temps easily (and definitely not
2136 * efficiently).
2137 */
2138 bool unpackable_tess =
2139 (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
2140 (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
2141 (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
2142
2143 /* Transform feedback code assumes varying arrays are packed, so if the
2144 * driver has disabled varying packing, make sure to at least enable
2145 * packing required by transform feedback.
2146 */
2147 bool xfb_enabled =
2148 ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
2149
2150 /* Disable packing on outward facing interfaces for SSO because in ES we
2151 * need to retain the unpacked varying information for draw time
2152 * validation.
2153 *
2154 * Packing is still enabled on individual arrays, structs, and matrices as
2155 * these are required by the transform feedback code and it is still safe
2156 * to do so. We also enable packing when a varying is only used for
2157 * transform feedback and its not a SSO.
2158 */
2159 bool disable_varying_packing =
2160 ctx->Const.DisableVaryingPacking || unpackable_tess;
2161 if (prog->SeparateShader && (producer == NULL || consumer == NULL))
2162 disable_varying_packing = true;
2163
2164 varying_matches matches(disable_varying_packing, xfb_enabled,
2165 ctx->Extensions.ARB_enhanced_layouts,
2166 producer ? producer->Stage : MESA_SHADER_NONE,
2167 consumer ? consumer->Stage : MESA_SHADER_NONE);
2168 hash_table *tfeedback_candidates =
2169 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2170 _mesa_key_string_equal);
2171 hash_table *consumer_inputs =
2172 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2173 _mesa_key_string_equal);
2174 hash_table *consumer_interface_inputs =
2175 _mesa_hash_table_create(NULL, _mesa_key_hash_string,
2176 _mesa_key_string_equal);
2177 ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
2178 NULL,
2179 };
2180
2181 unsigned consumer_vertices = 0;
2182 if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
2183 consumer_vertices = prog->Geom.VerticesIn;
2184
2185 /* Operate in a total of four passes.
2186 *
2187 * 1. Sort inputs / outputs into a canonical order. This is necessary so
2188 * that inputs / outputs of separable shaders will be assigned
2189 * predictable locations regardless of the order in which declarations
2190 * appeared in the shader source.
2191 *
2192 * 2. Assign locations for any matching inputs and outputs.
2193 *
2194 * 3. Mark output variables in the producer that do not have locations as
2195 * not being outputs. This lets the optimizer eliminate them.
2196 *
2197 * 4. Mark input variables in the consumer that do not have locations as
2198 * not being inputs. This lets the optimizer eliminate them.
2199 */
2200 if (consumer)
2201 canonicalize_shader_io(consumer->ir, ir_var_shader_in);
2202
2203 if (producer)
2204 canonicalize_shader_io(producer->ir, ir_var_shader_out);
2205
2206 if (consumer)
2207 linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
2208 consumer_inputs,
2209 consumer_interface_inputs,
2210 consumer_inputs_with_locations);
2211
2212 if (producer) {
2213 foreach_in_list(ir_instruction, node, producer->ir) {
2214 ir_variable *const output_var = node->as_variable();
2215
2216 if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
2217 continue;
2218
2219 /* Only geometry shaders can use non-zero streams */
2220 assert(output_var->data.stream == 0 ||
2221 (output_var->data.stream < MAX_VERTEX_STREAMS &&
2222 producer->Stage == MESA_SHADER_GEOMETRY));
2223
2224 if (num_tfeedback_decls > 0) {
2225 tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates);
2226 g.process(output_var);
2227 }
2228
2229 ir_variable *const input_var =
2230 linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
2231 consumer_interface_inputs,
2232 consumer_inputs_with_locations);
2233
2234 /* If a matching input variable was found, add this output (and the
2235 * input) to the set. If this is a separable program and there is no
2236 * consumer stage, add the output.
2237 *
2238 * Always add TCS outputs. They are shared by all invocations
2239 * within a patch and can be used as shared memory.
2240 */
2241 if (input_var || (prog->SeparateShader && consumer == NULL) ||
2242 producer->Stage == MESA_SHADER_TESS_CTRL) {
2243 matches.record(output_var, input_var);
2244 }
2245
2246 /* Only stream 0 outputs can be consumed in the next stage */
2247 if (input_var && output_var->data.stream != 0) {
2248 linker_error(prog, "output %s is assigned to stream=%d but "
2249 "is linked to an input, which requires stream=0",
2250 output_var->name, output_var->data.stream);
2251 return false;
2252 }
2253 }
2254 } else {
2255 /* If there's no producer stage, then this must be a separable program.
2256 * For example, we may have a program that has just a fragment shader.
2257 * Later this program will be used with some arbitrary vertex (or
2258 * geometry) shader program. This means that locations must be assigned
2259 * for all the inputs.
2260 */
2261 foreach_in_list(ir_instruction, node, consumer->ir) {
2262 ir_variable *const input_var = node->as_variable();
2263
2264 if (input_var == NULL || input_var->data.mode != ir_var_shader_in)
2265 continue;
2266
2267 matches.record(NULL, input_var);
2268 }
2269 }
2270
2271 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2272 if (!tfeedback_decls[i].is_varying())
2273 continue;
2274
2275 const tfeedback_candidate *matched_candidate
2276 = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
2277
2278 if (matched_candidate == NULL) {
2279 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2280 return false;
2281 }
2282
2283 /* Mark xfb varyings as always active */
2284 matched_candidate->toplevel_var->data.always_active_io = 1;
2285
2286 /* Mark any corresponding inputs as always active also. We must do this
2287 * because we have a NIR pass that lowers vectors to scalars and another
2288 * that removes unused varyings.
2289 * We don't split varyings marked as always active because there is no
2290 * point in doing so. This means we need to mark both sides of the
2291 * interface as always active otherwise we will have a mismatch and
2292 * start removing things we shouldn't.
2293 */
2294 ir_variable *const input_var =
2295 linker::get_matching_input(mem_ctx, matched_candidate->toplevel_var,
2296 consumer_inputs,
2297 consumer_interface_inputs,
2298 consumer_inputs_with_locations);
2299 if (input_var)
2300 input_var->data.always_active_io = 1;
2301
2302 if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
2303 matched_candidate->toplevel_var->data.is_xfb_only = 1;
2304 matches.record(matched_candidate->toplevel_var, NULL);
2305 }
2306 }
2307
2308 _mesa_hash_table_destroy(consumer_inputs, NULL);
2309 _mesa_hash_table_destroy(consumer_interface_inputs, NULL);
2310
2311 uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
2312 const unsigned slots_used = matches.assign_locations(
2313 prog, components, reserved_slots);
2314 matches.store_locations();
2315
2316 for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
2317 if (!tfeedback_decls[i].is_varying())
2318 continue;
2319
2320 if (!tfeedback_decls[i].assign_location(ctx, prog)) {
2321 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2322 return false;
2323 }
2324 }
2325 _mesa_hash_table_destroy(tfeedback_candidates, NULL);
2326
2327 if (consumer && producer) {
2328 foreach_in_list(ir_instruction, node, consumer->ir) {
2329 ir_variable *const var = node->as_variable();
2330
2331 if (var && var->data.mode == ir_var_shader_in &&
2332 var->data.is_unmatched_generic_inout) {
2333 if (!prog->IsES && prog->data->Version <= 120) {
2334 /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
2335 *
2336 * Only those varying variables used (i.e. read) in
2337 * the fragment shader executable must be written to
2338 * by the vertex shader executable; declaring
2339 * superfluous varying variables in a vertex shader is
2340 * permissible.
2341 *
2342 * We interpret this text as meaning that the VS must
2343 * write the variable for the FS to read it. See
2344 * "glsl1-varying read but not written" in piglit.
2345 */
2346 linker_error(prog, "%s shader varying %s not written "
2347 "by %s shader\n.",
2348 _mesa_shader_stage_to_string(consumer->Stage),
2349 var->name,
2350 _mesa_shader_stage_to_string(producer->Stage));
2351 } else {
2352 linker_warning(prog, "%s shader varying %s not written "
2353 "by %s shader\n.",
2354 _mesa_shader_stage_to_string(consumer->Stage),
2355 var->name,
2356 _mesa_shader_stage_to_string(producer->Stage));
2357 }
2358 }
2359 }
2360
2361 /* Now that validation is done its safe to remove unused varyings. As
2362 * we have both a producer and consumer its safe to remove unused
2363 * varyings even if the program is a SSO because the stages are being
2364 * linked together i.e. we have a multi-stage SSO.
2365 */
2366 remove_unused_shader_inputs_and_outputs(false, producer,
2367 ir_var_shader_out);
2368 remove_unused_shader_inputs_and_outputs(false, consumer,
2369 ir_var_shader_in);
2370 }
2371
2372 if (producer) {
2373 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
2374 0, producer, disable_varying_packing,
2375 xfb_enabled);
2376 }
2377
2378 if (consumer) {
2379 lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
2380 consumer_vertices, consumer,
2381 disable_varying_packing, xfb_enabled);
2382 }
2383
2384 return true;
2385 }
2386
2387 static bool
2388 check_against_output_limit(struct gl_context *ctx,
2389 struct gl_shader_program *prog,
2390 gl_linked_shader *producer,
2391 unsigned num_explicit_locations)
2392 {
2393 unsigned output_vectors = num_explicit_locations;
2394
2395 foreach_in_list(ir_instruction, node, producer->ir) {
2396 ir_variable *const var = node->as_variable();
2397
2398 if (var && !var->data.explicit_location &&
2399 var->data.mode == ir_var_shader_out &&
2400 var_counts_against_varying_limit(producer->Stage, var)) {
2401 /* outputs for fragment shader can't be doubles */
2402 output_vectors += var->type->count_attribute_slots(false);
2403 }
2404 }
2405
2406 assert(producer->Stage != MESA_SHADER_FRAGMENT);
2407 unsigned max_output_components =
2408 ctx->Const.Program[producer->Stage].MaxOutputComponents;
2409
2410 const unsigned output_components = output_vectors * 4;
2411 if (output_components > max_output_components) {
2412 if (ctx->API == API_OPENGLES2 || prog->IsES)
2413 linker_error(prog, "%s shader uses too many output vectors "
2414 "(%u > %u)\n",
2415 _mesa_shader_stage_to_string(producer->Stage),
2416 output_vectors,
2417 max_output_components / 4);
2418 else
2419 linker_error(prog, "%s shader uses too many output components "
2420 "(%u > %u)\n",
2421 _mesa_shader_stage_to_string(producer->Stage),
2422 output_components,
2423 max_output_components);
2424
2425 return false;
2426 }
2427
2428 return true;
2429 }
2430
2431 static bool
2432 check_against_input_limit(struct gl_context *ctx,
2433 struct gl_shader_program *prog,
2434 gl_linked_shader *consumer,
2435 unsigned num_explicit_locations)
2436 {
2437 unsigned input_vectors = num_explicit_locations;
2438
2439 foreach_in_list(ir_instruction, node, consumer->ir) {
2440 ir_variable *const var = node->as_variable();
2441
2442 if (var && !var->data.explicit_location &&
2443 var->data.mode == ir_var_shader_in &&
2444 var_counts_against_varying_limit(consumer->Stage, var)) {
2445 /* vertex inputs aren't varying counted */
2446 input_vectors += var->type->count_attribute_slots(false);
2447 }
2448 }
2449
2450 assert(consumer->Stage != MESA_SHADER_VERTEX);
2451 unsigned max_input_components =
2452 ctx->Const.Program[consumer->Stage].MaxInputComponents;
2453
2454 const unsigned input_components = input_vectors * 4;
2455 if (input_components > max_input_components) {
2456 if (ctx->API == API_OPENGLES2 || prog->IsES)
2457 linker_error(prog, "%s shader uses too many input vectors "
2458 "(%u > %u)\n",
2459 _mesa_shader_stage_to_string(consumer->Stage),
2460 input_vectors,
2461 max_input_components / 4);
2462 else
2463 linker_error(prog, "%s shader uses too many input components "
2464 "(%u > %u)\n",
2465 _mesa_shader_stage_to_string(consumer->Stage),
2466 input_components,
2467 max_input_components);
2468
2469 return false;
2470 }
2471
2472 return true;
2473 }
2474
2475 bool
2476 link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
2477 struct gl_context *ctx, void *mem_ctx)
2478 {
2479 bool has_xfb_qualifiers = false;
2480 unsigned num_tfeedback_decls = 0;
2481 char **varying_names = NULL;
2482 tfeedback_decl *tfeedback_decls = NULL;
2483
2484 /* From the ARB_enhanced_layouts spec:
2485 *
2486 * "If the shader used to record output variables for transform feedback
2487 * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
2488 * qualifiers, the values specified by TransformFeedbackVaryings are
2489 * ignored, and the set of variables captured for transform feedback is
2490 * instead derived from the specified layout qualifiers."
2491 */
2492 for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
2493 /* Find last stage before fragment shader */
2494 if (prog->_LinkedShaders[i]) {
2495 has_xfb_qualifiers =
2496 process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
2497 prog, &num_tfeedback_decls,
2498 &varying_names);
2499 break;
2500 }
2501 }
2502
2503 if (!has_xfb_qualifiers) {
2504 num_tfeedback_decls = prog->TransformFeedback.NumVarying;
2505 varying_names = prog->TransformFeedback.VaryingNames;
2506 }
2507
2508 if (num_tfeedback_decls != 0) {
2509 /* From GL_EXT_transform_feedback:
2510 * A program will fail to link if:
2511 *
2512 * * the <count> specified by TransformFeedbackVaryingsEXT is
2513 * non-zero, but the program object has no vertex or geometry
2514 * shader;
2515 */
2516 if (first >= MESA_SHADER_FRAGMENT) {
2517 linker_error(prog, "Transform feedback varyings specified, but "
2518 "no vertex, tessellation, or geometry shader is "
2519 "present.\n");
2520 return false;
2521 }
2522
2523 tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
2524 num_tfeedback_decls);
2525 if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
2526 varying_names, tfeedback_decls))
2527 return false;
2528 }
2529
2530 /* If there is no fragment shader we need to set transform feedback.
2531 *
2532 * For SSO we also need to assign output locations. We assign them here
2533 * because we need to do it for both single stage programs and multi stage
2534 * programs.
2535 */
2536 if (last < MESA_SHADER_FRAGMENT &&
2537 (num_tfeedback_decls != 0 || prog->SeparateShader)) {
2538 const uint64_t reserved_out_slots =
2539 reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
2540 if (!assign_varying_locations(ctx, mem_ctx, prog,
2541 prog->_LinkedShaders[last], NULL,
2542 num_tfeedback_decls, tfeedback_decls,
2543 reserved_out_slots))
2544 return false;
2545 }
2546
2547 if (last <= MESA_SHADER_FRAGMENT) {
2548 /* Remove unused varyings from the first/last stage unless SSO */
2549 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2550 prog->_LinkedShaders[first],
2551 ir_var_shader_in);
2552 remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
2553 prog->_LinkedShaders[last],
2554 ir_var_shader_out);
2555
2556 /* If the program is made up of only a single stage */
2557 if (first == last) {
2558 gl_linked_shader *const sh = prog->_LinkedShaders[last];
2559
2560 do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
2561 do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
2562 tfeedback_decls);
2563
2564 if (prog->SeparateShader) {
2565 const uint64_t reserved_slots =
2566 reserved_varying_slot(sh, ir_var_shader_in);
2567
2568 /* Assign input locations for SSO, output locations are already
2569 * assigned.
2570 */
2571 if (!assign_varying_locations(ctx, mem_ctx, prog,
2572 NULL /* producer */,
2573 sh /* consumer */,
2574 0 /* num_tfeedback_decls */,
2575 NULL /* tfeedback_decls */,
2576 reserved_slots))
2577 return false;
2578 }
2579 } else {
2580 /* Linking the stages in the opposite order (from fragment to vertex)
2581 * ensures that inter-shader outputs written to in an earlier stage
2582 * are eliminated if they are (transitively) not used in a later
2583 * stage.
2584 */
2585 int next = last;
2586 for (int i = next - 1; i >= 0; i--) {
2587 if (prog->_LinkedShaders[i] == NULL && i != 0)
2588 continue;
2589
2590 gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
2591 gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
2592
2593 const uint64_t reserved_out_slots =
2594 reserved_varying_slot(sh_i, ir_var_shader_out);
2595 const uint64_t reserved_in_slots =
2596 reserved_varying_slot(sh_next, ir_var_shader_in);
2597
2598 do_dead_builtin_varyings(ctx, sh_i, sh_next,
2599 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2600 tfeedback_decls);
2601
2602 if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
2603 next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
2604 tfeedback_decls,
2605 reserved_out_slots | reserved_in_slots))
2606 return false;
2607
2608 /* This must be done after all dead varyings are eliminated. */
2609 if (sh_i != NULL) {
2610 unsigned slots_used = _mesa_bitcount_64(reserved_out_slots);
2611 if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
2612 return false;
2613 }
2614 }
2615
2616 unsigned slots_used = _mesa_bitcount_64(reserved_in_slots);
2617 if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
2618 return false;
2619
2620 next = i;
2621 }
2622 }
2623 }
2624
2625 if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
2626 has_xfb_qualifiers))
2627 return false;
2628
2629 return true;
2630 }