nir: fix packing of TCS varyings not read by the TES
[mesa.git] / src / compiler / nir / nir_linking_helpers.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "util/set.h"
27 #include "util/hash_table.h"
28
29 /* This file contains various little helpers for doing simple linking in
30 * NIR. Eventually, we'll probably want a full-blown varying packing
31 * implementation in here. Right now, it just deletes unused things.
32 */
33
34 /**
35 * Returns the bits in the inputs_read, outputs_written, or
36 * system_values_read bitfield corresponding to this variable.
37 */
38 static uint64_t
39 get_variable_io_mask(nir_variable *var, gl_shader_stage stage)
40 {
41 if (var->data.location < 0)
42 return 0;
43
44 unsigned location = var->data.patch ?
45 var->data.location - VARYING_SLOT_PATCH0 : var->data.location;
46
47 assert(var->data.mode == nir_var_shader_in ||
48 var->data.mode == nir_var_shader_out ||
49 var->data.mode == nir_var_system_value);
50 assert(var->data.location >= 0);
51
52 const struct glsl_type *type = var->type;
53 if (nir_is_per_vertex_io(var, stage)) {
54 assert(glsl_type_is_array(type));
55 type = glsl_get_array_element(type);
56 }
57
58 unsigned slots = glsl_count_attribute_slots(type, false);
59 return ((1ull << slots) - 1) << location;
60 }
61
62 static uint8_t
63 get_num_components(nir_variable *var)
64 {
65 if (glsl_type_is_struct_or_ifc(glsl_without_array(var->type)))
66 return 4;
67
68 return glsl_get_vector_elements(glsl_without_array(var->type));
69 }
70
71 static void
72 tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read)
73 {
74 nir_foreach_function(function, shader) {
75 if (!function->impl)
76 continue;
77
78 nir_foreach_block(block, function->impl) {
79 nir_foreach_instr(instr, block) {
80 if (instr->type != nir_instr_type_intrinsic)
81 continue;
82
83 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
84 if (intrin->intrinsic != nir_intrinsic_load_deref)
85 continue;
86
87 nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
88 if (deref->mode != nir_var_shader_out)
89 continue;
90
91 nir_variable *var = nir_deref_instr_get_variable(deref);
92 for (unsigned i = 0; i < get_num_components(var); i++) {
93 if (var->data.patch) {
94 patches_read[var->data.location_frac + i] |=
95 get_variable_io_mask(var, shader->info.stage);
96 } else {
97 read[var->data.location_frac + i] |=
98 get_variable_io_mask(var, shader->info.stage);
99 }
100 }
101 }
102 }
103 }
104 }
105
106 /**
107 * Helper for removing unused shader I/O variables, by demoting them to global
108 * variables (which may then by dead code eliminated).
109 *
110 * Example usage is:
111 *
112 * progress = nir_remove_unused_io_vars(producer,
113 * &producer->outputs,
114 * read, patches_read) ||
115 * progress;
116 *
117 * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
118 * representing each .location_frac used. Note that for vector variables,
119 * only the first channel (.location_frac) is examined for deciding if the
120 * variable is used!
121 */
122 bool
123 nir_remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
124 uint64_t *used_by_other_stage,
125 uint64_t *used_by_other_stage_patches)
126 {
127 bool progress = false;
128 uint64_t *used;
129
130 nir_foreach_variable_safe(var, var_list) {
131 if (var->data.patch)
132 used = used_by_other_stage_patches;
133 else
134 used = used_by_other_stage;
135
136 if (var->data.location < VARYING_SLOT_VAR0 && var->data.location >= 0)
137 continue;
138
139 if (var->data.always_active_io)
140 continue;
141
142 if (var->data.explicit_xfb_buffer)
143 continue;
144
145 uint64_t other_stage = used[var->data.location_frac];
146
147 if (!(other_stage & get_variable_io_mask(var, shader->info.stage))) {
148 /* This one is invalid, make it a global variable instead */
149 var->data.location = 0;
150 var->data.mode = nir_var_shader_temp;
151
152 exec_node_remove(&var->node);
153 exec_list_push_tail(&shader->globals, &var->node);
154
155 progress = true;
156 }
157 }
158
159 if (progress)
160 nir_fixup_deref_modes(shader);
161
162 return progress;
163 }
164
165 bool
166 nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer)
167 {
168 assert(producer->info.stage != MESA_SHADER_FRAGMENT);
169 assert(consumer->info.stage != MESA_SHADER_VERTEX);
170
171 uint64_t read[4] = { 0 }, written[4] = { 0 };
172 uint64_t patches_read[4] = { 0 }, patches_written[4] = { 0 };
173
174 nir_foreach_variable(var, &producer->outputs) {
175 for (unsigned i = 0; i < get_num_components(var); i++) {
176 if (var->data.patch) {
177 patches_written[var->data.location_frac + i] |=
178 get_variable_io_mask(var, producer->info.stage);
179 } else {
180 written[var->data.location_frac + i] |=
181 get_variable_io_mask(var, producer->info.stage);
182 }
183 }
184 }
185
186 nir_foreach_variable(var, &consumer->inputs) {
187 for (unsigned i = 0; i < get_num_components(var); i++) {
188 if (var->data.patch) {
189 patches_read[var->data.location_frac + i] |=
190 get_variable_io_mask(var, consumer->info.stage);
191 } else {
192 read[var->data.location_frac + i] |=
193 get_variable_io_mask(var, consumer->info.stage);
194 }
195 }
196 }
197
198 /* Each TCS invocation can read data written by other TCS invocations,
199 * so even if the outputs are not used by the TES we must also make
200 * sure they are not read by the TCS before demoting them to globals.
201 */
202 if (producer->info.stage == MESA_SHADER_TESS_CTRL)
203 tcs_add_output_reads(producer, read, patches_read);
204
205 bool progress = false;
206 progress = nir_remove_unused_io_vars(producer, &producer->outputs, read,
207 patches_read);
208
209 progress = nir_remove_unused_io_vars(consumer, &consumer->inputs, written,
210 patches_written) || progress;
211
212 return progress;
213 }
214
215 static uint8_t
216 get_interp_type(nir_variable *var, const struct glsl_type *type,
217 bool default_to_smooth_interp)
218 {
219 if (glsl_type_is_integer(type))
220 return INTERP_MODE_FLAT;
221 else if (var->data.interpolation != INTERP_MODE_NONE)
222 return var->data.interpolation;
223 else if (default_to_smooth_interp)
224 return INTERP_MODE_SMOOTH;
225 else
226 return INTERP_MODE_NONE;
227 }
228
229 #define INTERPOLATE_LOC_SAMPLE 0
230 #define INTERPOLATE_LOC_CENTROID 1
231 #define INTERPOLATE_LOC_CENTER 2
232
233 static uint8_t
234 get_interp_loc(nir_variable *var)
235 {
236 if (var->data.sample)
237 return INTERPOLATE_LOC_SAMPLE;
238 else if (var->data.centroid)
239 return INTERPOLATE_LOC_CENTROID;
240 else
241 return INTERPOLATE_LOC_CENTER;
242 }
243
244 static bool
245 is_packing_supported_for_type(const struct glsl_type *type)
246 {
247 /* We ignore complex types such as arrays, matrices, structs and bitsizes
248 * other then 32bit. All other vector types should have been split into
249 * scalar variables by the lower_io_to_scalar pass. The only exception
250 * should be OpenGL xfb varyings.
251 * TODO: add support for more complex types?
252 */
253 return glsl_type_is_scalar(type) && glsl_type_is_32bit(type);
254 }
255
256 struct assigned_comps
257 {
258 uint8_t comps;
259 uint8_t interp_type;
260 uint8_t interp_loc;
261 bool is_32bit;
262 };
263
264 /* Packing arrays and dual slot varyings is difficult so to avoid complex
265 * algorithms this function just assigns them their existing location for now.
266 * TODO: allow better packing of complex types.
267 */
268 static void
269 get_unmoveable_components_masks(struct exec_list *var_list,
270 struct assigned_comps *comps,
271 gl_shader_stage stage,
272 bool default_to_smooth_interp)
273 {
274 nir_foreach_variable_safe(var, var_list) {
275 assert(var->data.location >= 0);
276
277 /* Only remap things that aren't built-ins. */
278 if (var->data.location >= VARYING_SLOT_VAR0 &&
279 var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
280
281 const struct glsl_type *type = var->type;
282 if (nir_is_per_vertex_io(var, stage)) {
283 assert(glsl_type_is_array(type));
284 type = glsl_get_array_element(type);
285 }
286
287 /* If we can pack this varying then don't mark the components as
288 * used.
289 */
290 if (is_packing_supported_for_type(type))
291 continue;
292
293 unsigned location = var->data.location - VARYING_SLOT_VAR0;
294
295 unsigned elements =
296 glsl_type_is_vector_or_scalar(glsl_without_array(type)) ?
297 glsl_get_vector_elements(glsl_without_array(type)) : 4;
298
299 bool dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
300 unsigned slots = glsl_count_attribute_slots(type, false);
301 unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
302 unsigned comps_slot2 = 0;
303 for (unsigned i = 0; i < slots; i++) {
304 if (dual_slot) {
305 if (i & 1) {
306 comps[location + i].comps |= ((1 << comps_slot2) - 1);
307 } else {
308 unsigned num_comps = 4 - var->data.location_frac;
309 comps_slot2 = (elements * dmul) - num_comps;
310
311 /* Assume ARB_enhanced_layouts packing rules for doubles */
312 assert(var->data.location_frac == 0 ||
313 var->data.location_frac == 2);
314 assert(comps_slot2 <= 4);
315
316 comps[location + i].comps |=
317 ((1 << num_comps) - 1) << var->data.location_frac;
318 }
319 } else {
320 comps[location + i].comps |=
321 ((1 << (elements * dmul)) - 1) << var->data.location_frac;
322 }
323
324 comps[location + i].interp_type =
325 get_interp_type(var, type, default_to_smooth_interp);
326 comps[location + i].interp_loc = get_interp_loc(var);
327 comps[location + i].is_32bit =
328 glsl_type_is_32bit(glsl_without_array(type));
329 }
330 }
331 }
332 }
333
334 struct varying_loc
335 {
336 uint8_t component;
337 uint32_t location;
338 };
339
340 static void
341 mark_all_used_slots(nir_variable *var, uint64_t *slots_used,
342 uint64_t slots_used_mask, unsigned num_slots)
343 {
344 unsigned loc_offset = var->data.patch ? VARYING_SLOT_PATCH0 : 0;
345
346 slots_used[var->data.patch ? 1 : 0] |= slots_used_mask &
347 BITFIELD64_RANGE(var->data.location - loc_offset, num_slots);
348 }
349
350 static void
351 mark_used_slot(nir_variable *var, uint64_t *slots_used, unsigned offset)
352 {
353 unsigned loc_offset = var->data.patch ? VARYING_SLOT_PATCH0 : 0;
354
355 slots_used[var->data.patch ? 1 : 0] |=
356 BITFIELD64_BIT(var->data.location - loc_offset + offset);
357 }
358
359 static void
360 remap_slots_and_components(struct exec_list *var_list, gl_shader_stage stage,
361 struct varying_loc (*remap)[4],
362 uint64_t *slots_used, uint64_t *out_slots_read,
363 uint32_t *p_slots_used, uint32_t *p_out_slots_read)
364 {
365 uint64_t out_slots_read_tmp[2] = {0};
366 uint64_t slots_used_tmp[2] = {0};
367
368 /* We don't touch builtins so just copy the bitmask */
369 slots_used_tmp[0] = *slots_used & BITFIELD64_RANGE(0, VARYING_SLOT_VAR0);
370
371 nir_foreach_variable(var, var_list) {
372 assert(var->data.location >= 0);
373
374 /* Only remap things that aren't built-ins */
375 if (var->data.location >= VARYING_SLOT_VAR0 &&
376 var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
377
378 const struct glsl_type *type = var->type;
379 if (nir_is_per_vertex_io(var, stage)) {
380 assert(glsl_type_is_array(type));
381 type = glsl_get_array_element(type);
382 }
383
384 unsigned num_slots = glsl_count_attribute_slots(type, false);
385 bool used_across_stages = false;
386 bool outputs_read = false;
387
388 unsigned location = var->data.location - VARYING_SLOT_VAR0;
389 struct varying_loc *new_loc = &remap[location][var->data.location_frac];
390
391 unsigned loc_offset = var->data.patch ? VARYING_SLOT_PATCH0 : 0;
392 uint64_t used = var->data.patch ? *p_slots_used : *slots_used;
393 uint64_t outs_used =
394 var->data.patch ? *p_out_slots_read : *out_slots_read;
395 uint64_t slots =
396 BITFIELD64_RANGE(var->data.location - loc_offset, num_slots);
397
398 if (slots & used)
399 used_across_stages = true;
400
401 if (slots & outs_used)
402 outputs_read = true;
403
404 if (new_loc->location) {
405 var->data.location = new_loc->location;
406 var->data.location_frac = new_loc->component;
407 }
408
409 if (var->data.always_active_io) {
410 /* We can't apply link time optimisations (specifically array
411 * splitting) to these so we need to copy the existing mask
412 * otherwise we will mess up the mask for things like partially
413 * marked arrays.
414 */
415 if (used_across_stages)
416 mark_all_used_slots(var, slots_used_tmp, used, num_slots);
417
418 if (outputs_read) {
419 mark_all_used_slots(var, out_slots_read_tmp, outs_used,
420 num_slots);
421 }
422 } else {
423 for (unsigned i = 0; i < num_slots; i++) {
424 if (used_across_stages)
425 mark_used_slot(var, slots_used_tmp, i);
426
427 if (outputs_read)
428 mark_used_slot(var, out_slots_read_tmp, i);
429 }
430 }
431 }
432 }
433
434 *slots_used = slots_used_tmp[0];
435 *out_slots_read = out_slots_read_tmp[0];
436 *p_slots_used = slots_used_tmp[1];
437 *p_out_slots_read = out_slots_read_tmp[1];
438 }
439
440 struct varying_component {
441 nir_variable *var;
442 uint8_t interp_type;
443 uint8_t interp_loc;
444 bool is_32bit;
445 bool is_patch;
446 bool is_intra_stage_only;
447 bool initialised;
448 };
449
450 static int
451 cmp_varying_component(const void *comp1_v, const void *comp2_v)
452 {
453 struct varying_component *comp1 = (struct varying_component *) comp1_v;
454 struct varying_component *comp2 = (struct varying_component *) comp2_v;
455
456 /* We want patches to be order at the end of the array */
457 if (comp1->is_patch != comp2->is_patch)
458 return comp1->is_patch ? 1 : -1;
459
460 /* We want to try to group together TCS outputs that are only read by other
461 * TCS invocations and not consumed by the follow stage.
462 */
463 if (comp1->is_intra_stage_only != comp2->is_intra_stage_only)
464 return comp1->is_intra_stage_only ? 1 : -1;
465
466 /* We can only pack varyings with matching interpolation types so group
467 * them together.
468 */
469 if (comp1->interp_type != comp2->interp_type)
470 return comp1->interp_type - comp2->interp_type;
471
472 /* Interpolation loc must match also. */
473 if (comp1->interp_loc != comp2->interp_loc)
474 return comp1->interp_loc - comp2->interp_loc;
475
476 /* If everything else matches just use the original location to sort */
477 return comp1->var->data.location - comp2->var->data.location;
478 }
479
480 static void
481 gather_varying_component_info(nir_shader *producer, nir_shader *consumer,
482 struct varying_component **varying_comp_info,
483 unsigned *varying_comp_info_size,
484 bool default_to_smooth_interp)
485 {
486 unsigned store_varying_info_idx[MAX_VARYINGS_INCL_PATCH][4] = {{0}};
487 unsigned num_of_comps_to_pack = 0;
488
489 /* Count the number of varying that can be packed and create a mapping
490 * of those varyings to the array we will pass to qsort.
491 */
492 nir_foreach_variable(var, &producer->outputs) {
493
494 /* Only remap things that aren't builtins. */
495 if (var->data.location >= VARYING_SLOT_VAR0 &&
496 var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
497
498 /* We can't repack xfb varyings. */
499 if (var->data.always_active_io)
500 continue;
501
502 const struct glsl_type *type = var->type;
503 if (nir_is_per_vertex_io(var, producer->info.stage)) {
504 assert(glsl_type_is_array(type));
505 type = glsl_get_array_element(type);
506 }
507
508 if (!is_packing_supported_for_type(type))
509 continue;
510
511 unsigned loc = var->data.location - VARYING_SLOT_VAR0;
512 store_varying_info_idx[loc][var->data.location_frac] =
513 ++num_of_comps_to_pack;
514 }
515 }
516
517 *varying_comp_info_size = num_of_comps_to_pack;
518 *varying_comp_info = rzalloc_array(NULL, struct varying_component,
519 num_of_comps_to_pack);
520
521 nir_function_impl *impl = nir_shader_get_entrypoint(consumer);
522
523 /* Walk over the shader and populate the varying component info array */
524 nir_foreach_block(block, impl) {
525 nir_foreach_instr(instr, block) {
526 if (instr->type != nir_instr_type_intrinsic)
527 continue;
528
529 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
530 if (intr->intrinsic != nir_intrinsic_load_deref &&
531 intr->intrinsic != nir_intrinsic_interp_deref_at_centroid &&
532 intr->intrinsic != nir_intrinsic_interp_deref_at_sample &&
533 intr->intrinsic != nir_intrinsic_interp_deref_at_offset &&
534 intr->intrinsic != nir_intrinsic_interp_deref_at_vertex)
535 continue;
536
537 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
538 if (deref->mode != nir_var_shader_in)
539 continue;
540
541 /* We only remap things that aren't builtins. */
542 nir_variable *in_var = nir_deref_instr_get_variable(deref);
543 if (in_var->data.location < VARYING_SLOT_VAR0)
544 continue;
545
546 unsigned location = in_var->data.location - VARYING_SLOT_VAR0;
547 if (location >= MAX_VARYINGS_INCL_PATCH)
548 continue;
549
550 unsigned var_info_idx =
551 store_varying_info_idx[location][in_var->data.location_frac];
552 if (!var_info_idx)
553 continue;
554
555 struct varying_component *vc_info =
556 &(*varying_comp_info)[var_info_idx-1];
557
558 if (!vc_info->initialised) {
559 const struct glsl_type *type = in_var->type;
560 if (nir_is_per_vertex_io(in_var, consumer->info.stage)) {
561 assert(glsl_type_is_array(type));
562 type = glsl_get_array_element(type);
563 }
564
565 vc_info->var = in_var;
566 vc_info->interp_type =
567 get_interp_type(in_var, type, default_to_smooth_interp);
568 vc_info->interp_loc = get_interp_loc(in_var);
569 vc_info->is_32bit = glsl_type_is_32bit(type);
570 vc_info->is_patch = in_var->data.patch;
571 vc_info->is_intra_stage_only = false;
572 }
573 }
574 }
575
576 /* Walk over the shader and populate the varying component info array
577 * for varyings which are read by other TCS instances but are not consumed
578 * by the TES.
579 */
580 if (producer->info.stage == MESA_SHADER_TESS_CTRL) {
581 impl = nir_shader_get_entrypoint(producer);
582
583 nir_foreach_block(block, impl) {
584 nir_foreach_instr(instr, block) {
585 if (instr->type != nir_instr_type_intrinsic)
586 continue;
587
588 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
589 if (intr->intrinsic != nir_intrinsic_load_deref)
590 continue;
591
592 nir_deref_instr *deref = nir_src_as_deref(intr->src[0]);
593 if (deref->mode != nir_var_shader_out)
594 continue;
595
596 /* We only remap things that aren't builtins. */
597 nir_variable *out_var = nir_deref_instr_get_variable(deref);
598 if (out_var->data.location < VARYING_SLOT_VAR0)
599 continue;
600
601 unsigned location = out_var->data.location - VARYING_SLOT_VAR0;
602 if (location >= MAX_VARYINGS_INCL_PATCH)
603 continue;
604
605 unsigned var_info_idx =
606 store_varying_info_idx[location][out_var->data.location_frac];
607 if (!var_info_idx)
608 continue;
609
610 struct varying_component *vc_info =
611 &(*varying_comp_info)[var_info_idx-1];
612
613 if (!vc_info->initialised) {
614 const struct glsl_type *type = out_var->type;
615 if (nir_is_per_vertex_io(out_var, producer->info.stage)) {
616 assert(glsl_type_is_array(type));
617 type = glsl_get_array_element(type);
618 }
619
620 vc_info->var = out_var;
621 vc_info->interp_type =
622 get_interp_type(out_var, type, default_to_smooth_interp);
623 vc_info->interp_loc = get_interp_loc(out_var);
624 vc_info->is_32bit = glsl_type_is_32bit(type);
625 vc_info->is_patch = out_var->data.patch;
626 vc_info->is_intra_stage_only = true;
627 }
628 }
629 }
630 }
631 }
632
633 static void
634 assign_remap_locations(struct varying_loc (*remap)[4],
635 struct assigned_comps *assigned_comps,
636 struct varying_component *info,
637 unsigned *cursor, unsigned *comp,
638 unsigned max_location)
639 {
640 unsigned tmp_cursor = *cursor;
641 unsigned tmp_comp = *comp;
642
643 for (; tmp_cursor < max_location; tmp_cursor++) {
644
645 if (assigned_comps[tmp_cursor].comps) {
646 /* We can only pack varyings with matching interpolation types,
647 * interpolation loc must match also.
648 * TODO: i965 can handle interpolation locations that don't match,
649 * but the radeonsi nir backend handles everything as vec4s and so
650 * expects this to be the same for all components. We could make this
651 * check driver specfific or drop it if NIR ever become the only
652 * radeonsi backend.
653 */
654 if (assigned_comps[tmp_cursor].interp_type != info->interp_type ||
655 assigned_comps[tmp_cursor].interp_loc != info->interp_loc) {
656 tmp_comp = 0;
657 continue;
658 }
659
660 /* We can only pack varyings with matching types, and the current
661 * algorithm only supports packing 32-bit.
662 */
663 if (!assigned_comps[tmp_cursor].is_32bit) {
664 tmp_comp = 0;
665 continue;
666 }
667
668 while (tmp_comp < 4 &&
669 (assigned_comps[tmp_cursor].comps & (1 << tmp_comp))) {
670 tmp_comp++;
671 }
672 }
673
674 if (tmp_comp == 4) {
675 tmp_comp = 0;
676 continue;
677 }
678
679 unsigned location = info->var->data.location - VARYING_SLOT_VAR0;
680
681 /* Once we have assigned a location mark it as used */
682 assigned_comps[tmp_cursor].comps |= (1 << tmp_comp);
683 assigned_comps[tmp_cursor].interp_type = info->interp_type;
684 assigned_comps[tmp_cursor].interp_loc = info->interp_loc;
685 assigned_comps[tmp_cursor].is_32bit = info->is_32bit;
686
687 /* Assign remap location */
688 remap[location][info->var->data.location_frac].component = tmp_comp++;
689 remap[location][info->var->data.location_frac].location =
690 tmp_cursor + VARYING_SLOT_VAR0;
691
692 break;
693 }
694
695 *cursor = tmp_cursor;
696 *comp = tmp_comp;
697 }
698
699 /* If there are empty components in the slot compact the remaining components
700 * as close to component 0 as possible. This will make it easier to fill the
701 * empty components with components from a different slot in a following pass.
702 */
703 static void
704 compact_components(nir_shader *producer, nir_shader *consumer,
705 struct assigned_comps *assigned_comps,
706 bool default_to_smooth_interp)
707 {
708 struct exec_list *input_list = &consumer->inputs;
709 struct exec_list *output_list = &producer->outputs;
710 struct varying_loc remap[MAX_VARYINGS_INCL_PATCH][4] = {{{0}, {0}}};
711 struct varying_component *varying_comp_info;
712 unsigned varying_comp_info_size;
713
714 /* Gather varying component info */
715 gather_varying_component_info(producer, consumer, &varying_comp_info,
716 &varying_comp_info_size,
717 default_to_smooth_interp);
718
719 /* Sort varying components. */
720 qsort(varying_comp_info, varying_comp_info_size,
721 sizeof(struct varying_component), cmp_varying_component);
722
723 unsigned cursor = 0;
724 unsigned comp = 0;
725
726 /* Set the remap array based on the sorted components */
727 for (unsigned i = 0; i < varying_comp_info_size; i++ ) {
728 struct varying_component *info = &varying_comp_info[i];
729
730 assert(info->is_patch || cursor < MAX_VARYING);
731 if (info->is_patch) {
732 /* The list should be sorted with all non-patch inputs first followed
733 * by patch inputs. When we hit our first patch input, we need to
734 * reset the cursor to MAX_VARYING so we put them in the right slot.
735 */
736 if (cursor < MAX_VARYING) {
737 cursor = MAX_VARYING;
738 comp = 0;
739 }
740
741 assign_remap_locations(remap, assigned_comps, info,
742 &cursor, &comp, MAX_VARYINGS_INCL_PATCH);
743 } else {
744 assign_remap_locations(remap, assigned_comps, info,
745 &cursor, &comp, MAX_VARYING);
746
747 /* Check if we failed to assign a remap location. This can happen if
748 * for example there are a bunch of unmovable components with
749 * mismatching interpolation types causing us to skip over locations
750 * that would have been useful for packing later components.
751 * The solution is to iterate over the locations again (this should
752 * happen very rarely in practice).
753 */
754 if (cursor == MAX_VARYING) {
755 cursor = 0;
756 comp = 0;
757 assign_remap_locations(remap, assigned_comps, info,
758 &cursor, &comp, MAX_VARYING);
759 }
760 }
761 }
762
763 ralloc_free(varying_comp_info);
764
765 uint64_t zero = 0;
766 uint32_t zero32 = 0;
767 remap_slots_and_components(input_list, consumer->info.stage, remap,
768 &consumer->info.inputs_read, &zero,
769 &consumer->info.patch_inputs_read, &zero32);
770 remap_slots_and_components(output_list, producer->info.stage, remap,
771 &producer->info.outputs_written,
772 &producer->info.outputs_read,
773 &producer->info.patch_outputs_written,
774 &producer->info.patch_outputs_read);
775 }
776
777 /* We assume that this has been called more-or-less directly after
778 * remove_unused_varyings. At this point, all of the varyings that we
779 * aren't going to be using have been completely removed and the
780 * inputs_read and outputs_written fields in nir_shader_info reflect
781 * this. Therefore, the total set of valid slots is the OR of the two
782 * sets of varyings; this accounts for varyings which one side may need
783 * to read/write even if the other doesn't. This can happen if, for
784 * instance, an array is used indirectly from one side causing it to be
785 * unsplittable but directly from the other.
786 */
787 void
788 nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
789 bool default_to_smooth_interp)
790 {
791 assert(producer->info.stage != MESA_SHADER_FRAGMENT);
792 assert(consumer->info.stage != MESA_SHADER_VERTEX);
793
794 struct assigned_comps assigned_comps[MAX_VARYINGS_INCL_PATCH] = {{0}};
795
796 get_unmoveable_components_masks(&producer->outputs, assigned_comps,
797 producer->info.stage,
798 default_to_smooth_interp);
799 get_unmoveable_components_masks(&consumer->inputs, assigned_comps,
800 consumer->info.stage,
801 default_to_smooth_interp);
802
803 compact_components(producer, consumer, assigned_comps,
804 default_to_smooth_interp);
805 }
806
807 /*
808 * Mark XFB varyings as always_active_io in the consumer so the linking opts
809 * don't touch them.
810 */
811 void
812 nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer)
813 {
814 nir_variable *input_vars[MAX_VARYING] = { 0 };
815
816 nir_foreach_variable(var, &consumer->inputs) {
817 if (var->data.location >= VARYING_SLOT_VAR0 &&
818 var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
819
820 unsigned location = var->data.location - VARYING_SLOT_VAR0;
821 input_vars[location] = var;
822 }
823 }
824
825 nir_foreach_variable(var, &producer->outputs) {
826 if (var->data.location >= VARYING_SLOT_VAR0 &&
827 var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
828
829 if (!var->data.always_active_io)
830 continue;
831
832 unsigned location = var->data.location - VARYING_SLOT_VAR0;
833 if (input_vars[location]) {
834 input_vars[location]->data.always_active_io = true;
835 }
836 }
837 }
838 }
839
840 static bool
841 does_varying_match(nir_variable *out_var, nir_variable *in_var)
842 {
843 return in_var->data.location == out_var->data.location &&
844 in_var->data.location_frac == out_var->data.location_frac;
845 }
846
847 static nir_variable *
848 get_matching_input_var(nir_shader *consumer, nir_variable *out_var)
849 {
850 nir_foreach_variable(var, &consumer->inputs) {
851 if (does_varying_match(out_var, var))
852 return var;
853 }
854
855 return NULL;
856 }
857
858 static bool
859 can_replace_varying(nir_variable *out_var)
860 {
861 /* Skip types that require more complex handling.
862 * TODO: add support for these types.
863 */
864 if (glsl_type_is_array(out_var->type) ||
865 glsl_type_is_dual_slot(out_var->type) ||
866 glsl_type_is_matrix(out_var->type) ||
867 glsl_type_is_struct_or_ifc(out_var->type))
868 return false;
869
870 /* Limit this pass to scalars for now to keep things simple. Most varyings
871 * should have been lowered to scalars at this point anyway.
872 */
873 if (!glsl_type_is_scalar(out_var->type))
874 return false;
875
876 if (out_var->data.location < VARYING_SLOT_VAR0 ||
877 out_var->data.location - VARYING_SLOT_VAR0 >= MAX_VARYING)
878 return false;
879
880 return true;
881 }
882
883 static bool
884 replace_constant_input(nir_shader *shader, nir_intrinsic_instr *store_intr)
885 {
886 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
887
888 nir_builder b;
889 nir_builder_init(&b, impl);
890
891 nir_variable *out_var =
892 nir_deref_instr_get_variable(nir_src_as_deref(store_intr->src[0]));
893
894 bool progress = false;
895 nir_foreach_block(block, impl) {
896 nir_foreach_instr(instr, block) {
897 if (instr->type != nir_instr_type_intrinsic)
898 continue;
899
900 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
901 if (intr->intrinsic != nir_intrinsic_load_deref)
902 continue;
903
904 nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
905 if (in_deref->mode != nir_var_shader_in)
906 continue;
907
908 nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
909
910 if (!does_varying_match(out_var, in_var))
911 continue;
912
913 b.cursor = nir_before_instr(instr);
914
915 nir_load_const_instr *out_const =
916 nir_instr_as_load_const(store_intr->src[1].ssa->parent_instr);
917
918 /* Add new const to replace the input */
919 nir_ssa_def *nconst = nir_build_imm(&b, store_intr->num_components,
920 intr->dest.ssa.bit_size,
921 out_const->value);
922
923 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(nconst));
924
925 progress = true;
926 }
927 }
928
929 return progress;
930 }
931
932 static bool
933 replace_duplicate_input(nir_shader *shader, nir_variable *input_var,
934 nir_intrinsic_instr *dup_store_intr)
935 {
936 assert(input_var);
937
938 nir_function_impl *impl = nir_shader_get_entrypoint(shader);
939
940 nir_builder b;
941 nir_builder_init(&b, impl);
942
943 nir_variable *dup_out_var =
944 nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr->src[0]));
945
946 bool progress = false;
947 nir_foreach_block(block, impl) {
948 nir_foreach_instr(instr, block) {
949 if (instr->type != nir_instr_type_intrinsic)
950 continue;
951
952 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
953 if (intr->intrinsic != nir_intrinsic_load_deref)
954 continue;
955
956 nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
957 if (in_deref->mode != nir_var_shader_in)
958 continue;
959
960 nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
961
962 if (!does_varying_match(dup_out_var, in_var) ||
963 in_var->data.interpolation != input_var->data.interpolation ||
964 get_interp_loc(in_var) != get_interp_loc(input_var))
965 continue;
966
967 b.cursor = nir_before_instr(instr);
968
969 nir_ssa_def *load = nir_load_var(&b, input_var);
970 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(load));
971
972 progress = true;
973 }
974 }
975
976 return progress;
977 }
978
979 bool
980 nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
981 {
982 /* TODO: Add support for more shader stage combinations */
983 if (consumer->info.stage != MESA_SHADER_FRAGMENT ||
984 (producer->info.stage != MESA_SHADER_VERTEX &&
985 producer->info.stage != MESA_SHADER_TESS_EVAL))
986 return false;
987
988 bool progress = false;
989
990 nir_function_impl *impl = nir_shader_get_entrypoint(producer);
991
992 struct hash_table *varying_values = _mesa_pointer_hash_table_create(NULL);
993
994 /* If we find a store in the last block of the producer we can be sure this
995 * is the only possible value for this output.
996 */
997 nir_block *last_block = nir_impl_last_block(impl);
998 nir_foreach_instr_reverse(instr, last_block) {
999 if (instr->type != nir_instr_type_intrinsic)
1000 continue;
1001
1002 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
1003
1004 if (intr->intrinsic != nir_intrinsic_store_deref)
1005 continue;
1006
1007 nir_deref_instr *out_deref = nir_src_as_deref(intr->src[0]);
1008 if (out_deref->mode != nir_var_shader_out)
1009 continue;
1010
1011 nir_variable *out_var = nir_deref_instr_get_variable(out_deref);
1012 if (!can_replace_varying(out_var))
1013 continue;
1014
1015 if (intr->src[1].ssa->parent_instr->type == nir_instr_type_load_const) {
1016 progress |= replace_constant_input(consumer, intr);
1017 } else {
1018 struct hash_entry *entry =
1019 _mesa_hash_table_search(varying_values, intr->src[1].ssa);
1020 if (entry) {
1021 progress |= replace_duplicate_input(consumer,
1022 (nir_variable *) entry->data,
1023 intr);
1024 } else {
1025 nir_variable *in_var = get_matching_input_var(consumer, out_var);
1026 if (in_var) {
1027 _mesa_hash_table_insert(varying_values, intr->src[1].ssa,
1028 in_var);
1029 }
1030 }
1031 }
1032 }
1033
1034 _mesa_hash_table_destroy(varying_values, NULL);
1035
1036 return progress;
1037 }
1038
1039 /* TODO any better helper somewhere to sort a list? */
1040
1041 static void
1042 insert_sorted(struct exec_list *var_list, nir_variable *new_var)
1043 {
1044 nir_foreach_variable(var, var_list) {
1045 if (var->data.location > new_var->data.location) {
1046 exec_node_insert_node_before(&var->node, &new_var->node);
1047 return;
1048 }
1049 }
1050 exec_list_push_tail(var_list, &new_var->node);
1051 }
1052
1053 static void
1054 sort_varyings(struct exec_list *var_list)
1055 {
1056 struct exec_list new_list;
1057 exec_list_make_empty(&new_list);
1058 nir_foreach_variable_safe(var, var_list) {
1059 exec_node_remove(&var->node);
1060 insert_sorted(&new_list, var);
1061 }
1062 exec_list_move_nodes_to(&new_list, var_list);
1063 }
1064
1065 void
1066 nir_assign_io_var_locations(struct exec_list *var_list, unsigned *size,
1067 gl_shader_stage stage)
1068 {
1069 unsigned location = 0;
1070 unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
1071 uint64_t processed_locs[2] = {0};
1072
1073 sort_varyings(var_list);
1074
1075 int UNUSED last_loc = 0;
1076 bool last_partial = false;
1077 nir_foreach_variable(var, var_list) {
1078 const struct glsl_type *type = var->type;
1079 if (nir_is_per_vertex_io(var, stage)) {
1080 assert(glsl_type_is_array(type));
1081 type = glsl_get_array_element(type);
1082 }
1083
1084 int base;
1085 if (var->data.mode == nir_var_shader_in && stage == MESA_SHADER_VERTEX)
1086 base = VERT_ATTRIB_GENERIC0;
1087 else if (var->data.mode == nir_var_shader_out &&
1088 stage == MESA_SHADER_FRAGMENT)
1089 base = FRAG_RESULT_DATA0;
1090 else
1091 base = VARYING_SLOT_VAR0;
1092
1093 unsigned var_size;
1094 if (var->data.compact) {
1095 /* compact variables must be arrays of scalars */
1096 assert(glsl_type_is_array(type));
1097 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1098 unsigned start = 4 * location + var->data.location_frac;
1099 unsigned end = start + glsl_get_length(type);
1100 var_size = end / 4 - location;
1101 last_partial = end % 4 != 0;
1102 } else {
1103 /* Compact variables bypass the normal varying compacting pass,
1104 * which means they cannot be in the same vec4 slot as a normal
1105 * variable. If part of the current slot is taken up by a compact
1106 * variable, we need to go to the next one.
1107 */
1108 if (last_partial) {
1109 location++;
1110 last_partial = false;
1111 }
1112 var_size = glsl_count_attribute_slots(type, false);
1113 }
1114
1115 /* Builtins don't allow component packing so we only need to worry about
1116 * user defined varyings sharing the same location.
1117 */
1118 bool processed = false;
1119 if (var->data.location >= base) {
1120 unsigned glsl_location = var->data.location - base;
1121
1122 for (unsigned i = 0; i < var_size; i++) {
1123 if (processed_locs[var->data.index] &
1124 ((uint64_t)1 << (glsl_location + i)))
1125 processed = true;
1126 else
1127 processed_locs[var->data.index] |=
1128 ((uint64_t)1 << (glsl_location + i));
1129 }
1130 }
1131
1132 /* Because component packing allows varyings to share the same location
1133 * we may have already have processed this location.
1134 */
1135 if (processed) {
1136 unsigned driver_location = assigned_locations[var->data.location];
1137 var->data.driver_location = driver_location;
1138
1139 /* An array may be packed such that is crosses multiple other arrays
1140 * or variables, we need to make sure we have allocated the elements
1141 * consecutively if the previously proccessed var was shorter than
1142 * the current array we are processing.
1143 *
1144 * NOTE: The code below assumes the var list is ordered in ascending
1145 * location order.
1146 */
1147 assert(last_loc <= var->data.location);
1148 last_loc = var->data.location;
1149 unsigned last_slot_location = driver_location + var_size;
1150 if (last_slot_location > location) {
1151 unsigned num_unallocated_slots = last_slot_location - location;
1152 unsigned first_unallocated_slot = var_size - num_unallocated_slots;
1153 for (unsigned i = first_unallocated_slot; i < var_size; i++) {
1154 assigned_locations[var->data.location + i] = location;
1155 location++;
1156 }
1157 }
1158 continue;
1159 }
1160
1161 for (unsigned i = 0; i < var_size; i++) {
1162 assigned_locations[var->data.location + i] = location + i;
1163 }
1164
1165 var->data.driver_location = location;
1166 location += var_size;
1167 }
1168
1169 if (last_partial)
1170 location++;
1171
1172 *size = location;
1173 }
1174