2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 #include "util/hash_table.h"
29 /* This file contains various little helpers for doing simple linking in
30 * NIR. Eventually, we'll probably want a full-blown varying packing
31 * implementation in here. Right now, it just deletes unused things.
35 * Returns the bits in the inputs_read, outputs_written, or
36 * system_values_read bitfield corresponding to this variable.
39 get_variable_io_mask(nir_variable
*var
, gl_shader_stage stage
)
41 if (var
->data
.location
< 0)
44 unsigned location
= var
->data
.patch
?
45 var
->data
.location
- VARYING_SLOT_PATCH0
: var
->data
.location
;
47 assert(var
->data
.mode
== nir_var_shader_in
||
48 var
->data
.mode
== nir_var_shader_out
||
49 var
->data
.mode
== nir_var_system_value
);
50 assert(var
->data
.location
>= 0);
52 const struct glsl_type
*type
= var
->type
;
53 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
54 assert(glsl_type_is_array(type
));
55 type
= glsl_get_array_element(type
);
58 unsigned slots
= glsl_count_attribute_slots(type
, false);
59 return ((1ull << slots
) - 1) << location
;
63 get_num_components(nir_variable
*var
)
65 if (glsl_type_is_struct_or_ifc(glsl_without_array(var
->type
)))
68 return glsl_get_vector_elements(glsl_without_array(var
->type
));
72 tcs_add_output_reads(nir_shader
*shader
, uint64_t *read
, uint64_t *patches_read
)
74 nir_foreach_function(function
, shader
) {
78 nir_foreach_block(block
, function
->impl
) {
79 nir_foreach_instr(instr
, block
) {
80 if (instr
->type
!= nir_instr_type_intrinsic
)
83 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
84 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
87 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
88 if (deref
->mode
!= nir_var_shader_out
)
91 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
92 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
93 if (var
->data
.patch
) {
94 patches_read
[var
->data
.location_frac
+ i
] |=
95 get_variable_io_mask(var
, shader
->info
.stage
);
97 read
[var
->data
.location_frac
+ i
] |=
98 get_variable_io_mask(var
, shader
->info
.stage
);
107 * Helper for removing unused shader I/O variables, by demoting them to global
108 * variables (which may then by dead code eliminated).
112 * progress = nir_remove_unused_io_vars(producer, nir_var_shader_out,
113 * read, patches_read) ||
116 * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
117 * representing each .location_frac used. Note that for vector variables,
118 * only the first channel (.location_frac) is examined for deciding if the
122 nir_remove_unused_io_vars(nir_shader
*shader
,
123 nir_variable_mode mode
,
124 uint64_t *used_by_other_stage
,
125 uint64_t *used_by_other_stage_patches
)
127 bool progress
= false;
130 assert(mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
);
131 struct exec_list
*var_list
=
132 mode
== nir_var_shader_in
? &shader
->inputs
: &shader
->outputs
;
134 nir_foreach_variable_safe(var
, var_list
) {
136 used
= used_by_other_stage_patches
;
138 used
= used_by_other_stage
;
140 if (var
->data
.location
< VARYING_SLOT_VAR0
&& var
->data
.location
>= 0)
143 if (var
->data
.always_active_io
)
146 if (var
->data
.explicit_xfb_buffer
)
149 uint64_t other_stage
= used
[var
->data
.location_frac
];
151 if (!(other_stage
& get_variable_io_mask(var
, shader
->info
.stage
))) {
152 /* This one is invalid, make it a global variable instead */
153 var
->data
.location
= 0;
154 var
->data
.mode
= nir_var_shader_temp
;
156 exec_node_remove(&var
->node
);
157 exec_list_push_tail(&shader
->globals
, &var
->node
);
164 nir_fixup_deref_modes(shader
);
170 nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
)
172 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
173 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
175 uint64_t read
[4] = { 0 }, written
[4] = { 0 };
176 uint64_t patches_read
[4] = { 0 }, patches_written
[4] = { 0 };
178 nir_foreach_shader_out_variable(var
, producer
) {
179 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
180 if (var
->data
.patch
) {
181 patches_written
[var
->data
.location_frac
+ i
] |=
182 get_variable_io_mask(var
, producer
->info
.stage
);
184 written
[var
->data
.location_frac
+ i
] |=
185 get_variable_io_mask(var
, producer
->info
.stage
);
190 nir_foreach_shader_in_variable(var
, consumer
) {
191 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
192 if (var
->data
.patch
) {
193 patches_read
[var
->data
.location_frac
+ i
] |=
194 get_variable_io_mask(var
, consumer
->info
.stage
);
196 read
[var
->data
.location_frac
+ i
] |=
197 get_variable_io_mask(var
, consumer
->info
.stage
);
202 /* Each TCS invocation can read data written by other TCS invocations,
203 * so even if the outputs are not used by the TES we must also make
204 * sure they are not read by the TCS before demoting them to globals.
206 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
)
207 tcs_add_output_reads(producer
, read
, patches_read
);
209 bool progress
= false;
210 progress
= nir_remove_unused_io_vars(producer
, nir_var_shader_out
, read
,
213 progress
= nir_remove_unused_io_vars(consumer
, nir_var_shader_in
, written
,
214 patches_written
) || progress
;
220 get_interp_type(nir_variable
*var
, const struct glsl_type
*type
,
221 bool default_to_smooth_interp
)
223 if (glsl_type_is_integer(type
))
224 return INTERP_MODE_FLAT
;
225 else if (var
->data
.interpolation
!= INTERP_MODE_NONE
)
226 return var
->data
.interpolation
;
227 else if (default_to_smooth_interp
)
228 return INTERP_MODE_SMOOTH
;
230 return INTERP_MODE_NONE
;
233 #define INTERPOLATE_LOC_SAMPLE 0
234 #define INTERPOLATE_LOC_CENTROID 1
235 #define INTERPOLATE_LOC_CENTER 2
238 get_interp_loc(nir_variable
*var
)
240 if (var
->data
.sample
)
241 return INTERPOLATE_LOC_SAMPLE
;
242 else if (var
->data
.centroid
)
243 return INTERPOLATE_LOC_CENTROID
;
245 return INTERPOLATE_LOC_CENTER
;
249 is_packing_supported_for_type(const struct glsl_type
*type
)
251 /* We ignore complex types such as arrays, matrices, structs and bitsizes
252 * other then 32bit. All other vector types should have been split into
253 * scalar variables by the lower_io_to_scalar pass. The only exception
254 * should be OpenGL xfb varyings.
255 * TODO: add support for more complex types?
257 return glsl_type_is_scalar(type
) && glsl_type_is_32bit(type
);
260 struct assigned_comps
268 /* Packing arrays and dual slot varyings is difficult so to avoid complex
269 * algorithms this function just assigns them their existing location for now.
270 * TODO: allow better packing of complex types.
273 get_unmoveable_components_masks(struct exec_list
*var_list
,
274 struct assigned_comps
*comps
,
275 gl_shader_stage stage
,
276 bool default_to_smooth_interp
)
278 nir_foreach_variable_safe(var
, var_list
) {
279 assert(var
->data
.location
>= 0);
281 /* Only remap things that aren't built-ins. */
282 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
283 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
285 const struct glsl_type
*type
= var
->type
;
286 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
287 assert(glsl_type_is_array(type
));
288 type
= glsl_get_array_element(type
);
291 /* If we can pack this varying then don't mark the components as
294 if (is_packing_supported_for_type(type
))
297 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
300 glsl_type_is_vector_or_scalar(glsl_without_array(type
)) ?
301 glsl_get_vector_elements(glsl_without_array(type
)) : 4;
303 bool dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
304 unsigned slots
= glsl_count_attribute_slots(type
, false);
305 unsigned dmul
= glsl_type_is_64bit(glsl_without_array(type
)) ? 2 : 1;
306 unsigned comps_slot2
= 0;
307 for (unsigned i
= 0; i
< slots
; i
++) {
310 comps
[location
+ i
].comps
|= ((1 << comps_slot2
) - 1);
312 unsigned num_comps
= 4 - var
->data
.location_frac
;
313 comps_slot2
= (elements
* dmul
) - num_comps
;
315 /* Assume ARB_enhanced_layouts packing rules for doubles */
316 assert(var
->data
.location_frac
== 0 ||
317 var
->data
.location_frac
== 2);
318 assert(comps_slot2
<= 4);
320 comps
[location
+ i
].comps
|=
321 ((1 << num_comps
) - 1) << var
->data
.location_frac
;
324 comps
[location
+ i
].comps
|=
325 ((1 << (elements
* dmul
)) - 1) << var
->data
.location_frac
;
328 comps
[location
+ i
].interp_type
=
329 get_interp_type(var
, type
, default_to_smooth_interp
);
330 comps
[location
+ i
].interp_loc
= get_interp_loc(var
);
331 comps
[location
+ i
].is_32bit
=
332 glsl_type_is_32bit(glsl_without_array(type
));
345 mark_all_used_slots(nir_variable
*var
, uint64_t *slots_used
,
346 uint64_t slots_used_mask
, unsigned num_slots
)
348 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
350 slots_used
[var
->data
.patch
? 1 : 0] |= slots_used_mask
&
351 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
355 mark_used_slot(nir_variable
*var
, uint64_t *slots_used
, unsigned offset
)
357 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
359 slots_used
[var
->data
.patch
? 1 : 0] |=
360 BITFIELD64_BIT(var
->data
.location
- loc_offset
+ offset
);
364 remap_slots_and_components(struct exec_list
*var_list
, gl_shader_stage stage
,
365 struct varying_loc (*remap
)[4],
366 uint64_t *slots_used
, uint64_t *out_slots_read
,
367 uint32_t *p_slots_used
, uint32_t *p_out_slots_read
)
369 uint64_t out_slots_read_tmp
[2] = {0};
370 uint64_t slots_used_tmp
[2] = {0};
372 /* We don't touch builtins so just copy the bitmask */
373 slots_used_tmp
[0] = *slots_used
& BITFIELD64_RANGE(0, VARYING_SLOT_VAR0
);
375 nir_foreach_variable(var
, var_list
) {
376 assert(var
->data
.location
>= 0);
378 /* Only remap things that aren't built-ins */
379 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
380 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
382 const struct glsl_type
*type
= var
->type
;
383 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
384 assert(glsl_type_is_array(type
));
385 type
= glsl_get_array_element(type
);
388 unsigned num_slots
= glsl_count_attribute_slots(type
, false);
389 bool used_across_stages
= false;
390 bool outputs_read
= false;
392 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
393 struct varying_loc
*new_loc
= &remap
[location
][var
->data
.location_frac
];
395 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
396 uint64_t used
= var
->data
.patch
? *p_slots_used
: *slots_used
;
398 var
->data
.patch
? *p_out_slots_read
: *out_slots_read
;
400 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
403 used_across_stages
= true;
405 if (slots
& outs_used
)
408 if (new_loc
->location
) {
409 var
->data
.location
= new_loc
->location
;
410 var
->data
.location_frac
= new_loc
->component
;
413 if (var
->data
.always_active_io
) {
414 /* We can't apply link time optimisations (specifically array
415 * splitting) to these so we need to copy the existing mask
416 * otherwise we will mess up the mask for things like partially
419 if (used_across_stages
)
420 mark_all_used_slots(var
, slots_used_tmp
, used
, num_slots
);
423 mark_all_used_slots(var
, out_slots_read_tmp
, outs_used
,
427 for (unsigned i
= 0; i
< num_slots
; i
++) {
428 if (used_across_stages
)
429 mark_used_slot(var
, slots_used_tmp
, i
);
432 mark_used_slot(var
, out_slots_read_tmp
, i
);
438 *slots_used
= slots_used_tmp
[0];
439 *out_slots_read
= out_slots_read_tmp
[0];
440 *p_slots_used
= slots_used_tmp
[1];
441 *p_out_slots_read
= out_slots_read_tmp
[1];
444 struct varying_component
{
450 bool is_intra_stage_only
;
455 cmp_varying_component(const void *comp1_v
, const void *comp2_v
)
457 struct varying_component
*comp1
= (struct varying_component
*) comp1_v
;
458 struct varying_component
*comp2
= (struct varying_component
*) comp2_v
;
460 /* We want patches to be order at the end of the array */
461 if (comp1
->is_patch
!= comp2
->is_patch
)
462 return comp1
->is_patch
? 1 : -1;
464 /* We want to try to group together TCS outputs that are only read by other
465 * TCS invocations and not consumed by the follow stage.
467 if (comp1
->is_intra_stage_only
!= comp2
->is_intra_stage_only
)
468 return comp1
->is_intra_stage_only
? 1 : -1;
470 /* We can only pack varyings with matching interpolation types so group
473 if (comp1
->interp_type
!= comp2
->interp_type
)
474 return comp1
->interp_type
- comp2
->interp_type
;
476 /* Interpolation loc must match also. */
477 if (comp1
->interp_loc
!= comp2
->interp_loc
)
478 return comp1
->interp_loc
- comp2
->interp_loc
;
480 /* If everything else matches just use the original location to sort */
481 return comp1
->var
->data
.location
- comp2
->var
->data
.location
;
485 gather_varying_component_info(nir_shader
*producer
, nir_shader
*consumer
,
486 struct varying_component
**varying_comp_info
,
487 unsigned *varying_comp_info_size
,
488 bool default_to_smooth_interp
)
490 unsigned store_varying_info_idx
[MAX_VARYINGS_INCL_PATCH
][4] = {{0}};
491 unsigned num_of_comps_to_pack
= 0;
493 /* Count the number of varying that can be packed and create a mapping
494 * of those varyings to the array we will pass to qsort.
496 nir_foreach_shader_out_variable(var
, producer
) {
498 /* Only remap things that aren't builtins. */
499 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
500 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
502 /* We can't repack xfb varyings. */
503 if (var
->data
.always_active_io
)
506 const struct glsl_type
*type
= var
->type
;
507 if (nir_is_per_vertex_io(var
, producer
->info
.stage
) || var
->data
.per_view
) {
508 assert(glsl_type_is_array(type
));
509 type
= glsl_get_array_element(type
);
512 if (!is_packing_supported_for_type(type
))
515 unsigned loc
= var
->data
.location
- VARYING_SLOT_VAR0
;
516 store_varying_info_idx
[loc
][var
->data
.location_frac
] =
517 ++num_of_comps_to_pack
;
521 *varying_comp_info_size
= num_of_comps_to_pack
;
522 *varying_comp_info
= rzalloc_array(NULL
, struct varying_component
,
523 num_of_comps_to_pack
);
525 nir_function_impl
*impl
= nir_shader_get_entrypoint(consumer
);
527 /* Walk over the shader and populate the varying component info array */
528 nir_foreach_block(block
, impl
) {
529 nir_foreach_instr(instr
, block
) {
530 if (instr
->type
!= nir_instr_type_intrinsic
)
533 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
534 if (intr
->intrinsic
!= nir_intrinsic_load_deref
&&
535 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_centroid
&&
536 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_sample
&&
537 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_offset
&&
538 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_vertex
)
541 nir_deref_instr
*deref
= nir_src_as_deref(intr
->src
[0]);
542 if (deref
->mode
!= nir_var_shader_in
)
545 /* We only remap things that aren't builtins. */
546 nir_variable
*in_var
= nir_deref_instr_get_variable(deref
);
547 if (in_var
->data
.location
< VARYING_SLOT_VAR0
)
550 unsigned location
= in_var
->data
.location
- VARYING_SLOT_VAR0
;
551 if (location
>= MAX_VARYINGS_INCL_PATCH
)
554 unsigned var_info_idx
=
555 store_varying_info_idx
[location
][in_var
->data
.location_frac
];
559 struct varying_component
*vc_info
=
560 &(*varying_comp_info
)[var_info_idx
-1];
562 if (!vc_info
->initialised
) {
563 const struct glsl_type
*type
= in_var
->type
;
564 if (nir_is_per_vertex_io(in_var
, consumer
->info
.stage
) ||
565 in_var
->data
.per_view
) {
566 assert(glsl_type_is_array(type
));
567 type
= glsl_get_array_element(type
);
570 vc_info
->var
= in_var
;
571 vc_info
->interp_type
=
572 get_interp_type(in_var
, type
, default_to_smooth_interp
);
573 vc_info
->interp_loc
= get_interp_loc(in_var
);
574 vc_info
->is_32bit
= glsl_type_is_32bit(type
);
575 vc_info
->is_patch
= in_var
->data
.patch
;
576 vc_info
->is_intra_stage_only
= false;
577 vc_info
->initialised
= true;
582 /* Walk over the shader and populate the varying component info array
583 * for varyings which are read by other TCS instances but are not consumed
586 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
587 impl
= nir_shader_get_entrypoint(producer
);
589 nir_foreach_block(block
, impl
) {
590 nir_foreach_instr(instr
, block
) {
591 if (instr
->type
!= nir_instr_type_intrinsic
)
594 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
595 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
598 nir_deref_instr
*deref
= nir_src_as_deref(intr
->src
[0]);
599 if (deref
->mode
!= nir_var_shader_out
)
602 /* We only remap things that aren't builtins. */
603 nir_variable
*out_var
= nir_deref_instr_get_variable(deref
);
604 if (out_var
->data
.location
< VARYING_SLOT_VAR0
)
607 unsigned location
= out_var
->data
.location
- VARYING_SLOT_VAR0
;
608 if (location
>= MAX_VARYINGS_INCL_PATCH
)
611 unsigned var_info_idx
=
612 store_varying_info_idx
[location
][out_var
->data
.location_frac
];
614 /* Something went wrong, the shader interfaces didn't match, so
615 * abandon packing. This can happen for example when the
616 * inputs are scalars but the outputs are struct members.
618 *varying_comp_info_size
= 0;
622 struct varying_component
*vc_info
=
623 &(*varying_comp_info
)[var_info_idx
-1];
625 if (!vc_info
->initialised
) {
626 const struct glsl_type
*type
= out_var
->type
;
627 if (nir_is_per_vertex_io(out_var
, producer
->info
.stage
)) {
628 assert(glsl_type_is_array(type
));
629 type
= glsl_get_array_element(type
);
632 vc_info
->var
= out_var
;
633 vc_info
->interp_type
=
634 get_interp_type(out_var
, type
, default_to_smooth_interp
);
635 vc_info
->interp_loc
= get_interp_loc(out_var
);
636 vc_info
->is_32bit
= glsl_type_is_32bit(type
);
637 vc_info
->is_patch
= out_var
->data
.patch
;
638 vc_info
->is_intra_stage_only
= true;
639 vc_info
->initialised
= true;
645 for (unsigned i
= 0; i
< *varying_comp_info_size
; i
++ ) {
646 struct varying_component
*vc_info
= &(*varying_comp_info
)[i
];
647 if (!vc_info
->initialised
) {
648 /* Something went wrong, the shader interfaces didn't match, so
649 * abandon packing. This can happen for example when the outputs are
650 * scalars but the inputs are struct members.
652 *varying_comp_info_size
= 0;
659 assign_remap_locations(struct varying_loc (*remap
)[4],
660 struct assigned_comps
*assigned_comps
,
661 struct varying_component
*info
,
662 unsigned *cursor
, unsigned *comp
,
663 unsigned max_location
)
665 unsigned tmp_cursor
= *cursor
;
666 unsigned tmp_comp
= *comp
;
668 for (; tmp_cursor
< max_location
; tmp_cursor
++) {
670 if (assigned_comps
[tmp_cursor
].comps
) {
671 /* We can only pack varyings with matching interpolation types,
672 * interpolation loc must match also.
673 * TODO: i965 can handle interpolation locations that don't match,
674 * but the radeonsi nir backend handles everything as vec4s and so
675 * expects this to be the same for all components. We could make this
676 * check driver specfific or drop it if NIR ever become the only
679 if (assigned_comps
[tmp_cursor
].interp_type
!= info
->interp_type
||
680 assigned_comps
[tmp_cursor
].interp_loc
!= info
->interp_loc
) {
685 /* We can only pack varyings with matching types, and the current
686 * algorithm only supports packing 32-bit.
688 if (!assigned_comps
[tmp_cursor
].is_32bit
) {
693 while (tmp_comp
< 4 &&
694 (assigned_comps
[tmp_cursor
].comps
& (1 << tmp_comp
))) {
704 unsigned location
= info
->var
->data
.location
- VARYING_SLOT_VAR0
;
706 /* Once we have assigned a location mark it as used */
707 assigned_comps
[tmp_cursor
].comps
|= (1 << tmp_comp
);
708 assigned_comps
[tmp_cursor
].interp_type
= info
->interp_type
;
709 assigned_comps
[tmp_cursor
].interp_loc
= info
->interp_loc
;
710 assigned_comps
[tmp_cursor
].is_32bit
= info
->is_32bit
;
712 /* Assign remap location */
713 remap
[location
][info
->var
->data
.location_frac
].component
= tmp_comp
++;
714 remap
[location
][info
->var
->data
.location_frac
].location
=
715 tmp_cursor
+ VARYING_SLOT_VAR0
;
720 *cursor
= tmp_cursor
;
724 /* If there are empty components in the slot compact the remaining components
725 * as close to component 0 as possible. This will make it easier to fill the
726 * empty components with components from a different slot in a following pass.
729 compact_components(nir_shader
*producer
, nir_shader
*consumer
,
730 struct assigned_comps
*assigned_comps
,
731 bool default_to_smooth_interp
)
733 struct exec_list
*input_list
= &consumer
->inputs
;
734 struct exec_list
*output_list
= &producer
->outputs
;
735 struct varying_loc remap
[MAX_VARYINGS_INCL_PATCH
][4] = {{{0}, {0}}};
736 struct varying_component
*varying_comp_info
;
737 unsigned varying_comp_info_size
;
739 /* Gather varying component info */
740 gather_varying_component_info(producer
, consumer
, &varying_comp_info
,
741 &varying_comp_info_size
,
742 default_to_smooth_interp
);
744 /* Sort varying components. */
745 qsort(varying_comp_info
, varying_comp_info_size
,
746 sizeof(struct varying_component
), cmp_varying_component
);
751 /* Set the remap array based on the sorted components */
752 for (unsigned i
= 0; i
< varying_comp_info_size
; i
++ ) {
753 struct varying_component
*info
= &varying_comp_info
[i
];
755 assert(info
->is_patch
|| cursor
< MAX_VARYING
);
756 if (info
->is_patch
) {
757 /* The list should be sorted with all non-patch inputs first followed
758 * by patch inputs. When we hit our first patch input, we need to
759 * reset the cursor to MAX_VARYING so we put them in the right slot.
761 if (cursor
< MAX_VARYING
) {
762 cursor
= MAX_VARYING
;
766 assign_remap_locations(remap
, assigned_comps
, info
,
767 &cursor
, &comp
, MAX_VARYINGS_INCL_PATCH
);
769 assign_remap_locations(remap
, assigned_comps
, info
,
770 &cursor
, &comp
, MAX_VARYING
);
772 /* Check if we failed to assign a remap location. This can happen if
773 * for example there are a bunch of unmovable components with
774 * mismatching interpolation types causing us to skip over locations
775 * that would have been useful for packing later components.
776 * The solution is to iterate over the locations again (this should
777 * happen very rarely in practice).
779 if (cursor
== MAX_VARYING
) {
782 assign_remap_locations(remap
, assigned_comps
, info
,
783 &cursor
, &comp
, MAX_VARYING
);
788 ralloc_free(varying_comp_info
);
792 remap_slots_and_components(input_list
, consumer
->info
.stage
, remap
,
793 &consumer
->info
.inputs_read
, &zero
,
794 &consumer
->info
.patch_inputs_read
, &zero32
);
795 remap_slots_and_components(output_list
, producer
->info
.stage
, remap
,
796 &producer
->info
.outputs_written
,
797 &producer
->info
.outputs_read
,
798 &producer
->info
.patch_outputs_written
,
799 &producer
->info
.patch_outputs_read
);
802 /* We assume that this has been called more-or-less directly after
803 * remove_unused_varyings. At this point, all of the varyings that we
804 * aren't going to be using have been completely removed and the
805 * inputs_read and outputs_written fields in nir_shader_info reflect
806 * this. Therefore, the total set of valid slots is the OR of the two
807 * sets of varyings; this accounts for varyings which one side may need
808 * to read/write even if the other doesn't. This can happen if, for
809 * instance, an array is used indirectly from one side causing it to be
810 * unsplittable but directly from the other.
813 nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
814 bool default_to_smooth_interp
)
816 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
817 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
819 struct assigned_comps assigned_comps
[MAX_VARYINGS_INCL_PATCH
] = {{0}};
821 get_unmoveable_components_masks(&producer
->outputs
, assigned_comps
,
822 producer
->info
.stage
,
823 default_to_smooth_interp
);
824 get_unmoveable_components_masks(&consumer
->inputs
, assigned_comps
,
825 consumer
->info
.stage
,
826 default_to_smooth_interp
);
828 compact_components(producer
, consumer
, assigned_comps
,
829 default_to_smooth_interp
);
833 * Mark XFB varyings as always_active_io in the consumer so the linking opts
837 nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
)
839 nir_variable
*input_vars
[MAX_VARYING
] = { 0 };
841 nir_foreach_shader_in_variable(var
, consumer
) {
842 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
843 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
845 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
846 input_vars
[location
] = var
;
850 nir_foreach_shader_out_variable(var
, producer
) {
851 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
852 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
854 if (!var
->data
.always_active_io
)
857 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
858 if (input_vars
[location
]) {
859 input_vars
[location
]->data
.always_active_io
= true;
866 does_varying_match(nir_variable
*out_var
, nir_variable
*in_var
)
868 return in_var
->data
.location
== out_var
->data
.location
&&
869 in_var
->data
.location_frac
== out_var
->data
.location_frac
;
872 static nir_variable
*
873 get_matching_input_var(nir_shader
*consumer
, nir_variable
*out_var
)
875 nir_foreach_shader_in_variable(var
, consumer
) {
876 if (does_varying_match(out_var
, var
))
884 can_replace_varying(nir_variable
*out_var
)
886 /* Skip types that require more complex handling.
887 * TODO: add support for these types.
889 if (glsl_type_is_array(out_var
->type
) ||
890 glsl_type_is_dual_slot(out_var
->type
) ||
891 glsl_type_is_matrix(out_var
->type
) ||
892 glsl_type_is_struct_or_ifc(out_var
->type
))
895 /* Limit this pass to scalars for now to keep things simple. Most varyings
896 * should have been lowered to scalars at this point anyway.
898 if (!glsl_type_is_scalar(out_var
->type
))
901 if (out_var
->data
.location
< VARYING_SLOT_VAR0
||
902 out_var
->data
.location
- VARYING_SLOT_VAR0
>= MAX_VARYING
)
909 replace_constant_input(nir_shader
*shader
, nir_intrinsic_instr
*store_intr
)
911 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
914 nir_builder_init(&b
, impl
);
916 nir_variable
*out_var
=
917 nir_deref_instr_get_variable(nir_src_as_deref(store_intr
->src
[0]));
919 bool progress
= false;
920 nir_foreach_block(block
, impl
) {
921 nir_foreach_instr(instr
, block
) {
922 if (instr
->type
!= nir_instr_type_intrinsic
)
925 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
926 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
929 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
930 if (in_deref
->mode
!= nir_var_shader_in
)
933 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
935 if (!does_varying_match(out_var
, in_var
))
938 b
.cursor
= nir_before_instr(instr
);
940 nir_load_const_instr
*out_const
=
941 nir_instr_as_load_const(store_intr
->src
[1].ssa
->parent_instr
);
943 /* Add new const to replace the input */
944 nir_ssa_def
*nconst
= nir_build_imm(&b
, store_intr
->num_components
,
945 intr
->dest
.ssa
.bit_size
,
948 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(nconst
));
958 replace_duplicate_input(nir_shader
*shader
, nir_variable
*input_var
,
959 nir_intrinsic_instr
*dup_store_intr
)
963 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
966 nir_builder_init(&b
, impl
);
968 nir_variable
*dup_out_var
=
969 nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr
->src
[0]));
971 bool progress
= false;
972 nir_foreach_block(block
, impl
) {
973 nir_foreach_instr(instr
, block
) {
974 if (instr
->type
!= nir_instr_type_intrinsic
)
977 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
978 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
981 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
982 if (in_deref
->mode
!= nir_var_shader_in
)
985 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
987 if (!does_varying_match(dup_out_var
, in_var
) ||
988 in_var
->data
.interpolation
!= input_var
->data
.interpolation
||
989 get_interp_loc(in_var
) != get_interp_loc(input_var
))
992 b
.cursor
= nir_before_instr(instr
);
994 nir_ssa_def
*load
= nir_load_var(&b
, input_var
);
995 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(load
));
1005 nir_link_opt_varyings(nir_shader
*producer
, nir_shader
*consumer
)
1007 /* TODO: Add support for more shader stage combinations */
1008 if (consumer
->info
.stage
!= MESA_SHADER_FRAGMENT
||
1009 (producer
->info
.stage
!= MESA_SHADER_VERTEX
&&
1010 producer
->info
.stage
!= MESA_SHADER_TESS_EVAL
))
1013 bool progress
= false;
1015 nir_function_impl
*impl
= nir_shader_get_entrypoint(producer
);
1017 struct hash_table
*varying_values
= _mesa_pointer_hash_table_create(NULL
);
1019 /* If we find a store in the last block of the producer we can be sure this
1020 * is the only possible value for this output.
1022 nir_block
*last_block
= nir_impl_last_block(impl
);
1023 nir_foreach_instr_reverse(instr
, last_block
) {
1024 if (instr
->type
!= nir_instr_type_intrinsic
)
1027 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
1029 if (intr
->intrinsic
!= nir_intrinsic_store_deref
)
1032 nir_deref_instr
*out_deref
= nir_src_as_deref(intr
->src
[0]);
1033 if (out_deref
->mode
!= nir_var_shader_out
)
1036 nir_variable
*out_var
= nir_deref_instr_get_variable(out_deref
);
1037 if (!can_replace_varying(out_var
))
1040 if (intr
->src
[1].ssa
->parent_instr
->type
== nir_instr_type_load_const
) {
1041 progress
|= replace_constant_input(consumer
, intr
);
1043 struct hash_entry
*entry
=
1044 _mesa_hash_table_search(varying_values
, intr
->src
[1].ssa
);
1046 progress
|= replace_duplicate_input(consumer
,
1047 (nir_variable
*) entry
->data
,
1050 nir_variable
*in_var
= get_matching_input_var(consumer
, out_var
);
1052 _mesa_hash_table_insert(varying_values
, intr
->src
[1].ssa
,
1059 _mesa_hash_table_destroy(varying_values
, NULL
);
1064 /* TODO any better helper somewhere to sort a list? */
1067 insert_sorted(struct exec_list
*var_list
, nir_variable
*new_var
)
1069 nir_foreach_variable(var
, var_list
) {
1070 if (var
->data
.location
> new_var
->data
.location
) {
1071 exec_node_insert_node_before(&var
->node
, &new_var
->node
);
1075 exec_list_push_tail(var_list
, &new_var
->node
);
1079 sort_varyings(struct exec_list
*var_list
)
1081 struct exec_list new_list
;
1082 exec_list_make_empty(&new_list
);
1083 nir_foreach_variable_safe(var
, var_list
) {
1084 exec_node_remove(&var
->node
);
1085 insert_sorted(&new_list
, var
);
1087 exec_list_move_nodes_to(&new_list
, var_list
);
1091 nir_assign_io_var_locations(struct exec_list
*var_list
, unsigned *size
,
1092 gl_shader_stage stage
)
1094 unsigned location
= 0;
1095 unsigned assigned_locations
[VARYING_SLOT_TESS_MAX
];
1096 uint64_t processed_locs
[2] = {0};
1098 sort_varyings(var_list
);
1100 int UNUSED last_loc
= 0;
1101 bool last_partial
= false;
1102 nir_foreach_variable(var
, var_list
) {
1103 const struct glsl_type
*type
= var
->type
;
1104 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
1105 assert(glsl_type_is_array(type
));
1106 type
= glsl_get_array_element(type
);
1110 if (var
->data
.mode
== nir_var_shader_in
&& stage
== MESA_SHADER_VERTEX
)
1111 base
= VERT_ATTRIB_GENERIC0
;
1112 else if (var
->data
.mode
== nir_var_shader_out
&&
1113 stage
== MESA_SHADER_FRAGMENT
)
1114 base
= FRAG_RESULT_DATA0
;
1116 base
= VARYING_SLOT_VAR0
;
1119 if (var
->data
.compact
) {
1120 /* If we are inside a partial compact,
1121 * don't allow another compact to be in this slot
1122 * if it starts at component 0.
1124 if (last_partial
&& var
->data
.location_frac
== 0) {
1128 /* compact variables must be arrays of scalars */
1129 assert(glsl_type_is_array(type
));
1130 assert(glsl_type_is_scalar(glsl_get_array_element(type
)));
1131 unsigned start
= 4 * location
+ var
->data
.location_frac
;
1132 unsigned end
= start
+ glsl_get_length(type
);
1133 var_size
= end
/ 4 - location
;
1134 last_partial
= end
% 4 != 0;
1136 /* Compact variables bypass the normal varying compacting pass,
1137 * which means they cannot be in the same vec4 slot as a normal
1138 * variable. If part of the current slot is taken up by a compact
1139 * variable, we need to go to the next one.
1143 last_partial
= false;
1145 var_size
= glsl_count_attribute_slots(type
, false);
1148 /* Builtins don't allow component packing so we only need to worry about
1149 * user defined varyings sharing the same location.
1151 bool processed
= false;
1152 if (var
->data
.location
>= base
) {
1153 unsigned glsl_location
= var
->data
.location
- base
;
1155 for (unsigned i
= 0; i
< var_size
; i
++) {
1156 if (processed_locs
[var
->data
.index
] &
1157 ((uint64_t)1 << (glsl_location
+ i
)))
1160 processed_locs
[var
->data
.index
] |=
1161 ((uint64_t)1 << (glsl_location
+ i
));
1165 /* Because component packing allows varyings to share the same location
1166 * we may have already have processed this location.
1169 unsigned driver_location
= assigned_locations
[var
->data
.location
];
1170 var
->data
.driver_location
= driver_location
;
1172 /* An array may be packed such that is crosses multiple other arrays
1173 * or variables, we need to make sure we have allocated the elements
1174 * consecutively if the previously proccessed var was shorter than
1175 * the current array we are processing.
1177 * NOTE: The code below assumes the var list is ordered in ascending
1180 assert(last_loc
<= var
->data
.location
);
1181 last_loc
= var
->data
.location
;
1182 unsigned last_slot_location
= driver_location
+ var_size
;
1183 if (last_slot_location
> location
) {
1184 unsigned num_unallocated_slots
= last_slot_location
- location
;
1185 unsigned first_unallocated_slot
= var_size
- num_unallocated_slots
;
1186 for (unsigned i
= first_unallocated_slot
; i
< var_size
; i
++) {
1187 assigned_locations
[var
->data
.location
+ i
] = location
;
1194 for (unsigned i
= 0; i
< var_size
; i
++) {
1195 assigned_locations
[var
->data
.location
+ i
] = location
+ i
;
1198 var
->data
.driver_location
= location
;
1199 location
+= var_size
;
1209 get_linked_variable_location(unsigned location
, bool patch
)
1214 /* Reserve locations 0...3 for special patch variables
1215 * like tess factors and bounding boxes, and the generic patch
1216 * variables will come after them.
1218 if (location
>= VARYING_SLOT_PATCH0
)
1219 return location
- VARYING_SLOT_PATCH0
+ 4;
1220 else if (location
>= VARYING_SLOT_TESS_LEVEL_OUTER
&&
1221 location
<= VARYING_SLOT_BOUNDING_BOX1
)
1222 return location
- VARYING_SLOT_TESS_LEVEL_OUTER
;
1224 unreachable("Unsupported variable in get_linked_variable_location.");
1228 get_linked_variable_io_mask(nir_variable
*variable
, gl_shader_stage stage
)
1230 const struct glsl_type
*type
= variable
->type
;
1232 if (nir_is_per_vertex_io(variable
, stage
)) {
1233 assert(glsl_type_is_array(type
));
1234 type
= glsl_get_array_element(type
);
1237 unsigned slots
= glsl_count_attribute_slots(type
, false);
1238 if (variable
->data
.compact
) {
1239 unsigned component_count
= variable
->data
.location_frac
+ glsl_get_length(type
);
1240 slots
= DIV_ROUND_UP(component_count
, 4);
1243 uint64_t mask
= u_bit_consecutive64(0, slots
);
1247 nir_linked_io_var_info
1248 nir_assign_linked_io_var_locations(nir_shader
*producer
, nir_shader
*consumer
)
1253 uint64_t producer_output_mask
= 0;
1254 uint64_t producer_patch_output_mask
= 0;
1256 nir_foreach_shader_out_variable(variable
, producer
) {
1257 uint64_t mask
= get_linked_variable_io_mask(variable
, producer
->info
.stage
);
1258 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1260 if (variable
->data
.patch
)
1261 producer_patch_output_mask
|= mask
<< loc
;
1263 producer_output_mask
|= mask
<< loc
;
1266 uint64_t consumer_input_mask
= 0;
1267 uint64_t consumer_patch_input_mask
= 0;
1269 nir_foreach_shader_in_variable(variable
, consumer
) {
1270 uint64_t mask
= get_linked_variable_io_mask(variable
, consumer
->info
.stage
);
1271 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1273 if (variable
->data
.patch
)
1274 consumer_patch_input_mask
|= mask
<< loc
;
1276 consumer_input_mask
|= mask
<< loc
;
1279 uint64_t io_mask
= producer_output_mask
| consumer_input_mask
;
1280 uint64_t patch_io_mask
= producer_patch_output_mask
| consumer_patch_input_mask
;
1282 nir_foreach_shader_out_variable(variable
, producer
) {
1283 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1285 if (variable
->data
.patch
)
1286 variable
->data
.driver_location
= util_bitcount64(patch_io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1288 variable
->data
.driver_location
= util_bitcount64(io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1291 nir_foreach_shader_in_variable(variable
, consumer
) {
1292 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1294 if (variable
->data
.patch
)
1295 variable
->data
.driver_location
= util_bitcount64(patch_io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1297 variable
->data
.driver_location
= util_bitcount64(io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1300 nir_linked_io_var_info result
= {
1301 .num_linked_io_vars
= util_bitcount64(io_mask
),
1302 .num_linked_patch_io_vars
= util_bitcount64(patch_io_mask
),