2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 #include "util/hash_table.h"
29 /* This file contains various little helpers for doing simple linking in
30 * NIR. Eventually, we'll probably want a full-blown varying packing
31 * implementation in here. Right now, it just deletes unused things.
35 * Returns the bits in the inputs_read, outputs_written, or
36 * system_values_read bitfield corresponding to this variable.
39 get_variable_io_mask(nir_variable
*var
, gl_shader_stage stage
)
41 if (var
->data
.location
< 0)
44 unsigned location
= var
->data
.patch
?
45 var
->data
.location
- VARYING_SLOT_PATCH0
: var
->data
.location
;
47 assert(var
->data
.mode
== nir_var_shader_in
||
48 var
->data
.mode
== nir_var_shader_out
||
49 var
->data
.mode
== nir_var_system_value
);
50 assert(var
->data
.location
>= 0);
52 const struct glsl_type
*type
= var
->type
;
53 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
54 assert(glsl_type_is_array(type
));
55 type
= glsl_get_array_element(type
);
58 unsigned slots
= glsl_count_attribute_slots(type
, false);
59 return ((1ull << slots
) - 1) << location
;
63 get_num_components(nir_variable
*var
)
65 if (glsl_type_is_struct_or_ifc(glsl_without_array(var
->type
)))
68 return glsl_get_vector_elements(glsl_without_array(var
->type
));
72 tcs_add_output_reads(nir_shader
*shader
, uint64_t *read
, uint64_t *patches_read
)
74 nir_foreach_function(function
, shader
) {
78 nir_foreach_block(block
, function
->impl
) {
79 nir_foreach_instr(instr
, block
) {
80 if (instr
->type
!= nir_instr_type_intrinsic
)
83 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
84 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
87 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
88 if (deref
->mode
!= nir_var_shader_out
)
91 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
92 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
93 if (var
->data
.patch
) {
94 patches_read
[var
->data
.location_frac
+ i
] |=
95 get_variable_io_mask(var
, shader
->info
.stage
);
97 read
[var
->data
.location_frac
+ i
] |=
98 get_variable_io_mask(var
, shader
->info
.stage
);
107 * Helper for removing unused shader I/O variables, by demoting them to global
108 * variables (which may then by dead code eliminated).
112 * progress = nir_remove_unused_io_vars(producer, nir_var_shader_out,
113 * read, patches_read) ||
116 * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
117 * representing each .location_frac used. Note that for vector variables,
118 * only the first channel (.location_frac) is examined for deciding if the
122 nir_remove_unused_io_vars(nir_shader
*shader
,
123 nir_variable_mode mode
,
124 uint64_t *used_by_other_stage
,
125 uint64_t *used_by_other_stage_patches
)
127 bool progress
= false;
130 assert(mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
);
132 nir_foreach_variable_with_modes_safe(var
, shader
, mode
) {
134 used
= used_by_other_stage_patches
;
136 used
= used_by_other_stage
;
138 if (var
->data
.location
< VARYING_SLOT_VAR0
&& var
->data
.location
>= 0)
141 if (var
->data
.always_active_io
)
144 if (var
->data
.explicit_xfb_buffer
)
147 uint64_t other_stage
= used
[var
->data
.location_frac
];
149 if (!(other_stage
& get_variable_io_mask(var
, shader
->info
.stage
))) {
150 /* This one is invalid, make it a global variable instead */
151 var
->data
.location
= 0;
152 var
->data
.mode
= nir_var_shader_temp
;
159 nir_fixup_deref_modes(shader
);
165 nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
)
167 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
168 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
170 uint64_t read
[4] = { 0 }, written
[4] = { 0 };
171 uint64_t patches_read
[4] = { 0 }, patches_written
[4] = { 0 };
173 nir_foreach_shader_out_variable(var
, producer
) {
174 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
175 if (var
->data
.patch
) {
176 patches_written
[var
->data
.location_frac
+ i
] |=
177 get_variable_io_mask(var
, producer
->info
.stage
);
179 written
[var
->data
.location_frac
+ i
] |=
180 get_variable_io_mask(var
, producer
->info
.stage
);
185 nir_foreach_shader_in_variable(var
, consumer
) {
186 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
187 if (var
->data
.patch
) {
188 patches_read
[var
->data
.location_frac
+ i
] |=
189 get_variable_io_mask(var
, consumer
->info
.stage
);
191 read
[var
->data
.location_frac
+ i
] |=
192 get_variable_io_mask(var
, consumer
->info
.stage
);
197 /* Each TCS invocation can read data written by other TCS invocations,
198 * so even if the outputs are not used by the TES we must also make
199 * sure they are not read by the TCS before demoting them to globals.
201 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
)
202 tcs_add_output_reads(producer
, read
, patches_read
);
204 bool progress
= false;
205 progress
= nir_remove_unused_io_vars(producer
, nir_var_shader_out
, read
,
208 progress
= nir_remove_unused_io_vars(consumer
, nir_var_shader_in
, written
,
209 patches_written
) || progress
;
215 get_interp_type(nir_variable
*var
, const struct glsl_type
*type
,
216 bool default_to_smooth_interp
)
218 if (glsl_type_is_integer(type
))
219 return INTERP_MODE_FLAT
;
220 else if (var
->data
.interpolation
!= INTERP_MODE_NONE
)
221 return var
->data
.interpolation
;
222 else if (default_to_smooth_interp
)
223 return INTERP_MODE_SMOOTH
;
225 return INTERP_MODE_NONE
;
228 #define INTERPOLATE_LOC_SAMPLE 0
229 #define INTERPOLATE_LOC_CENTROID 1
230 #define INTERPOLATE_LOC_CENTER 2
233 get_interp_loc(nir_variable
*var
)
235 if (var
->data
.sample
)
236 return INTERPOLATE_LOC_SAMPLE
;
237 else if (var
->data
.centroid
)
238 return INTERPOLATE_LOC_CENTROID
;
240 return INTERPOLATE_LOC_CENTER
;
244 is_packing_supported_for_type(const struct glsl_type
*type
)
246 /* We ignore complex types such as arrays, matrices, structs and bitsizes
247 * other then 32bit. All other vector types should have been split into
248 * scalar variables by the lower_io_to_scalar pass. The only exception
249 * should be OpenGL xfb varyings.
250 * TODO: add support for more complex types?
252 return glsl_type_is_scalar(type
) && glsl_type_is_32bit(type
);
255 struct assigned_comps
263 /* Packing arrays and dual slot varyings is difficult so to avoid complex
264 * algorithms this function just assigns them their existing location for now.
265 * TODO: allow better packing of complex types.
268 get_unmoveable_components_masks(nir_shader
*shader
,
269 nir_variable_mode mode
,
270 struct assigned_comps
*comps
,
271 gl_shader_stage stage
,
272 bool default_to_smooth_interp
)
274 nir_foreach_variable_with_modes_safe(var
, shader
, mode
) {
275 assert(var
->data
.location
>= 0);
277 /* Only remap things that aren't built-ins. */
278 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
279 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
281 const struct glsl_type
*type
= var
->type
;
282 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
283 assert(glsl_type_is_array(type
));
284 type
= glsl_get_array_element(type
);
287 /* If we can pack this varying then don't mark the components as
290 if (is_packing_supported_for_type(type
))
293 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
296 glsl_type_is_vector_or_scalar(glsl_without_array(type
)) ?
297 glsl_get_vector_elements(glsl_without_array(type
)) : 4;
299 bool dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
300 unsigned slots
= glsl_count_attribute_slots(type
, false);
301 unsigned dmul
= glsl_type_is_64bit(glsl_without_array(type
)) ? 2 : 1;
302 unsigned comps_slot2
= 0;
303 for (unsigned i
= 0; i
< slots
; i
++) {
306 comps
[location
+ i
].comps
|= ((1 << comps_slot2
) - 1);
308 unsigned num_comps
= 4 - var
->data
.location_frac
;
309 comps_slot2
= (elements
* dmul
) - num_comps
;
311 /* Assume ARB_enhanced_layouts packing rules for doubles */
312 assert(var
->data
.location_frac
== 0 ||
313 var
->data
.location_frac
== 2);
314 assert(comps_slot2
<= 4);
316 comps
[location
+ i
].comps
|=
317 ((1 << num_comps
) - 1) << var
->data
.location_frac
;
320 comps
[location
+ i
].comps
|=
321 ((1 << (elements
* dmul
)) - 1) << var
->data
.location_frac
;
324 comps
[location
+ i
].interp_type
=
325 get_interp_type(var
, type
, default_to_smooth_interp
);
326 comps
[location
+ i
].interp_loc
= get_interp_loc(var
);
327 comps
[location
+ i
].is_32bit
=
328 glsl_type_is_32bit(glsl_without_array(type
));
341 mark_all_used_slots(nir_variable
*var
, uint64_t *slots_used
,
342 uint64_t slots_used_mask
, unsigned num_slots
)
344 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
346 slots_used
[var
->data
.patch
? 1 : 0] |= slots_used_mask
&
347 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
351 mark_used_slot(nir_variable
*var
, uint64_t *slots_used
, unsigned offset
)
353 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
355 slots_used
[var
->data
.patch
? 1 : 0] |=
356 BITFIELD64_BIT(var
->data
.location
- loc_offset
+ offset
);
360 remap_slots_and_components(nir_shader
*shader
, nir_variable_mode mode
,
361 struct varying_loc (*remap
)[4],
362 uint64_t *slots_used
, uint64_t *out_slots_read
,
363 uint32_t *p_slots_used
, uint32_t *p_out_slots_read
)
365 const gl_shader_stage stage
= shader
->info
.stage
;
366 uint64_t out_slots_read_tmp
[2] = {0};
367 uint64_t slots_used_tmp
[2] = {0};
369 /* We don't touch builtins so just copy the bitmask */
370 slots_used_tmp
[0] = *slots_used
& BITFIELD64_RANGE(0, VARYING_SLOT_VAR0
);
372 nir_foreach_variable_with_modes(var
, shader
, mode
) {
373 assert(var
->data
.location
>= 0);
375 /* Only remap things that aren't built-ins */
376 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
377 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
379 const struct glsl_type
*type
= var
->type
;
380 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
381 assert(glsl_type_is_array(type
));
382 type
= glsl_get_array_element(type
);
385 unsigned num_slots
= glsl_count_attribute_slots(type
, false);
386 bool used_across_stages
= false;
387 bool outputs_read
= false;
389 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
390 struct varying_loc
*new_loc
= &remap
[location
][var
->data
.location_frac
];
392 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
393 uint64_t used
= var
->data
.patch
? *p_slots_used
: *slots_used
;
395 var
->data
.patch
? *p_out_slots_read
: *out_slots_read
;
397 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
400 used_across_stages
= true;
402 if (slots
& outs_used
)
405 if (new_loc
->location
) {
406 var
->data
.location
= new_loc
->location
;
407 var
->data
.location_frac
= new_loc
->component
;
410 if (var
->data
.always_active_io
) {
411 /* We can't apply link time optimisations (specifically array
412 * splitting) to these so we need to copy the existing mask
413 * otherwise we will mess up the mask for things like partially
416 if (used_across_stages
)
417 mark_all_used_slots(var
, slots_used_tmp
, used
, num_slots
);
420 mark_all_used_slots(var
, out_slots_read_tmp
, outs_used
,
424 for (unsigned i
= 0; i
< num_slots
; i
++) {
425 if (used_across_stages
)
426 mark_used_slot(var
, slots_used_tmp
, i
);
429 mark_used_slot(var
, out_slots_read_tmp
, i
);
435 *slots_used
= slots_used_tmp
[0];
436 *out_slots_read
= out_slots_read_tmp
[0];
437 *p_slots_used
= slots_used_tmp
[1];
438 *p_out_slots_read
= out_slots_read_tmp
[1];
441 struct varying_component
{
447 bool is_intra_stage_only
;
452 cmp_varying_component(const void *comp1_v
, const void *comp2_v
)
454 struct varying_component
*comp1
= (struct varying_component
*) comp1_v
;
455 struct varying_component
*comp2
= (struct varying_component
*) comp2_v
;
457 /* We want patches to be order at the end of the array */
458 if (comp1
->is_patch
!= comp2
->is_patch
)
459 return comp1
->is_patch
? 1 : -1;
461 /* We want to try to group together TCS outputs that are only read by other
462 * TCS invocations and not consumed by the follow stage.
464 if (comp1
->is_intra_stage_only
!= comp2
->is_intra_stage_only
)
465 return comp1
->is_intra_stage_only
? 1 : -1;
467 /* We can only pack varyings with matching interpolation types so group
470 if (comp1
->interp_type
!= comp2
->interp_type
)
471 return comp1
->interp_type
- comp2
->interp_type
;
473 /* Interpolation loc must match also. */
474 if (comp1
->interp_loc
!= comp2
->interp_loc
)
475 return comp1
->interp_loc
- comp2
->interp_loc
;
477 /* If everything else matches just use the original location to sort */
478 return comp1
->var
->data
.location
- comp2
->var
->data
.location
;
482 gather_varying_component_info(nir_shader
*producer
, nir_shader
*consumer
,
483 struct varying_component
**varying_comp_info
,
484 unsigned *varying_comp_info_size
,
485 bool default_to_smooth_interp
)
487 unsigned store_varying_info_idx
[MAX_VARYINGS_INCL_PATCH
][4] = {{0}};
488 unsigned num_of_comps_to_pack
= 0;
490 /* Count the number of varying that can be packed and create a mapping
491 * of those varyings to the array we will pass to qsort.
493 nir_foreach_shader_out_variable(var
, producer
) {
495 /* Only remap things that aren't builtins. */
496 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
497 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
499 /* We can't repack xfb varyings. */
500 if (var
->data
.always_active_io
)
503 const struct glsl_type
*type
= var
->type
;
504 if (nir_is_per_vertex_io(var
, producer
->info
.stage
) || var
->data
.per_view
) {
505 assert(glsl_type_is_array(type
));
506 type
= glsl_get_array_element(type
);
509 if (!is_packing_supported_for_type(type
))
512 unsigned loc
= var
->data
.location
- VARYING_SLOT_VAR0
;
513 store_varying_info_idx
[loc
][var
->data
.location_frac
] =
514 ++num_of_comps_to_pack
;
518 *varying_comp_info_size
= num_of_comps_to_pack
;
519 *varying_comp_info
= rzalloc_array(NULL
, struct varying_component
,
520 num_of_comps_to_pack
);
522 nir_function_impl
*impl
= nir_shader_get_entrypoint(consumer
);
524 /* Walk over the shader and populate the varying component info array */
525 nir_foreach_block(block
, impl
) {
526 nir_foreach_instr(instr
, block
) {
527 if (instr
->type
!= nir_instr_type_intrinsic
)
530 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
531 if (intr
->intrinsic
!= nir_intrinsic_load_deref
&&
532 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_centroid
&&
533 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_sample
&&
534 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_offset
&&
535 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_vertex
)
538 nir_deref_instr
*deref
= nir_src_as_deref(intr
->src
[0]);
539 if (deref
->mode
!= nir_var_shader_in
)
542 /* We only remap things that aren't builtins. */
543 nir_variable
*in_var
= nir_deref_instr_get_variable(deref
);
544 if (in_var
->data
.location
< VARYING_SLOT_VAR0
)
547 unsigned location
= in_var
->data
.location
- VARYING_SLOT_VAR0
;
548 if (location
>= MAX_VARYINGS_INCL_PATCH
)
551 unsigned var_info_idx
=
552 store_varying_info_idx
[location
][in_var
->data
.location_frac
];
556 struct varying_component
*vc_info
=
557 &(*varying_comp_info
)[var_info_idx
-1];
559 if (!vc_info
->initialised
) {
560 const struct glsl_type
*type
= in_var
->type
;
561 if (nir_is_per_vertex_io(in_var
, consumer
->info
.stage
) ||
562 in_var
->data
.per_view
) {
563 assert(glsl_type_is_array(type
));
564 type
= glsl_get_array_element(type
);
567 vc_info
->var
= in_var
;
568 vc_info
->interp_type
=
569 get_interp_type(in_var
, type
, default_to_smooth_interp
);
570 vc_info
->interp_loc
= get_interp_loc(in_var
);
571 vc_info
->is_32bit
= glsl_type_is_32bit(type
);
572 vc_info
->is_patch
= in_var
->data
.patch
;
573 vc_info
->is_intra_stage_only
= false;
574 vc_info
->initialised
= true;
579 /* Walk over the shader and populate the varying component info array
580 * for varyings which are read by other TCS instances but are not consumed
583 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
584 impl
= nir_shader_get_entrypoint(producer
);
586 nir_foreach_block(block
, impl
) {
587 nir_foreach_instr(instr
, block
) {
588 if (instr
->type
!= nir_instr_type_intrinsic
)
591 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
592 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
595 nir_deref_instr
*deref
= nir_src_as_deref(intr
->src
[0]);
596 if (deref
->mode
!= nir_var_shader_out
)
599 /* We only remap things that aren't builtins. */
600 nir_variable
*out_var
= nir_deref_instr_get_variable(deref
);
601 if (out_var
->data
.location
< VARYING_SLOT_VAR0
)
604 unsigned location
= out_var
->data
.location
- VARYING_SLOT_VAR0
;
605 if (location
>= MAX_VARYINGS_INCL_PATCH
)
608 unsigned var_info_idx
=
609 store_varying_info_idx
[location
][out_var
->data
.location_frac
];
611 /* Something went wrong, the shader interfaces didn't match, so
612 * abandon packing. This can happen for example when the
613 * inputs are scalars but the outputs are struct members.
615 *varying_comp_info_size
= 0;
619 struct varying_component
*vc_info
=
620 &(*varying_comp_info
)[var_info_idx
-1];
622 if (!vc_info
->initialised
) {
623 const struct glsl_type
*type
= out_var
->type
;
624 if (nir_is_per_vertex_io(out_var
, producer
->info
.stage
)) {
625 assert(glsl_type_is_array(type
));
626 type
= glsl_get_array_element(type
);
629 vc_info
->var
= out_var
;
630 vc_info
->interp_type
=
631 get_interp_type(out_var
, type
, default_to_smooth_interp
);
632 vc_info
->interp_loc
= get_interp_loc(out_var
);
633 vc_info
->is_32bit
= glsl_type_is_32bit(type
);
634 vc_info
->is_patch
= out_var
->data
.patch
;
635 vc_info
->is_intra_stage_only
= true;
636 vc_info
->initialised
= true;
642 for (unsigned i
= 0; i
< *varying_comp_info_size
; i
++ ) {
643 struct varying_component
*vc_info
= &(*varying_comp_info
)[i
];
644 if (!vc_info
->initialised
) {
645 /* Something went wrong, the shader interfaces didn't match, so
646 * abandon packing. This can happen for example when the outputs are
647 * scalars but the inputs are struct members.
649 *varying_comp_info_size
= 0;
656 assign_remap_locations(struct varying_loc (*remap
)[4],
657 struct assigned_comps
*assigned_comps
,
658 struct varying_component
*info
,
659 unsigned *cursor
, unsigned *comp
,
660 unsigned max_location
)
662 unsigned tmp_cursor
= *cursor
;
663 unsigned tmp_comp
= *comp
;
665 for (; tmp_cursor
< max_location
; tmp_cursor
++) {
667 if (assigned_comps
[tmp_cursor
].comps
) {
668 /* We can only pack varyings with matching interpolation types,
669 * interpolation loc must match also.
670 * TODO: i965 can handle interpolation locations that don't match,
671 * but the radeonsi nir backend handles everything as vec4s and so
672 * expects this to be the same for all components. We could make this
673 * check driver specfific or drop it if NIR ever become the only
676 if (assigned_comps
[tmp_cursor
].interp_type
!= info
->interp_type
||
677 assigned_comps
[tmp_cursor
].interp_loc
!= info
->interp_loc
) {
682 /* We can only pack varyings with matching types, and the current
683 * algorithm only supports packing 32-bit.
685 if (!assigned_comps
[tmp_cursor
].is_32bit
) {
690 while (tmp_comp
< 4 &&
691 (assigned_comps
[tmp_cursor
].comps
& (1 << tmp_comp
))) {
701 unsigned location
= info
->var
->data
.location
- VARYING_SLOT_VAR0
;
703 /* Once we have assigned a location mark it as used */
704 assigned_comps
[tmp_cursor
].comps
|= (1 << tmp_comp
);
705 assigned_comps
[tmp_cursor
].interp_type
= info
->interp_type
;
706 assigned_comps
[tmp_cursor
].interp_loc
= info
->interp_loc
;
707 assigned_comps
[tmp_cursor
].is_32bit
= info
->is_32bit
;
709 /* Assign remap location */
710 remap
[location
][info
->var
->data
.location_frac
].component
= tmp_comp
++;
711 remap
[location
][info
->var
->data
.location_frac
].location
=
712 tmp_cursor
+ VARYING_SLOT_VAR0
;
717 *cursor
= tmp_cursor
;
721 /* If there are empty components in the slot compact the remaining components
722 * as close to component 0 as possible. This will make it easier to fill the
723 * empty components with components from a different slot in a following pass.
726 compact_components(nir_shader
*producer
, nir_shader
*consumer
,
727 struct assigned_comps
*assigned_comps
,
728 bool default_to_smooth_interp
)
730 struct varying_loc remap
[MAX_VARYINGS_INCL_PATCH
][4] = {{{0}, {0}}};
731 struct varying_component
*varying_comp_info
;
732 unsigned varying_comp_info_size
;
734 /* Gather varying component info */
735 gather_varying_component_info(producer
, consumer
, &varying_comp_info
,
736 &varying_comp_info_size
,
737 default_to_smooth_interp
);
739 /* Sort varying components. */
740 qsort(varying_comp_info
, varying_comp_info_size
,
741 sizeof(struct varying_component
), cmp_varying_component
);
746 /* Set the remap array based on the sorted components */
747 for (unsigned i
= 0; i
< varying_comp_info_size
; i
++ ) {
748 struct varying_component
*info
= &varying_comp_info
[i
];
750 assert(info
->is_patch
|| cursor
< MAX_VARYING
);
751 if (info
->is_patch
) {
752 /* The list should be sorted with all non-patch inputs first followed
753 * by patch inputs. When we hit our first patch input, we need to
754 * reset the cursor to MAX_VARYING so we put them in the right slot.
756 if (cursor
< MAX_VARYING
) {
757 cursor
= MAX_VARYING
;
761 assign_remap_locations(remap
, assigned_comps
, info
,
762 &cursor
, &comp
, MAX_VARYINGS_INCL_PATCH
);
764 assign_remap_locations(remap
, assigned_comps
, info
,
765 &cursor
, &comp
, MAX_VARYING
);
767 /* Check if we failed to assign a remap location. This can happen if
768 * for example there are a bunch of unmovable components with
769 * mismatching interpolation types causing us to skip over locations
770 * that would have been useful for packing later components.
771 * The solution is to iterate over the locations again (this should
772 * happen very rarely in practice).
774 if (cursor
== MAX_VARYING
) {
777 assign_remap_locations(remap
, assigned_comps
, info
,
778 &cursor
, &comp
, MAX_VARYING
);
783 ralloc_free(varying_comp_info
);
787 remap_slots_and_components(consumer
, nir_var_shader_in
, remap
,
788 &consumer
->info
.inputs_read
, &zero
,
789 &consumer
->info
.patch_inputs_read
, &zero32
);
790 remap_slots_and_components(producer
, nir_var_shader_out
, remap
,
791 &producer
->info
.outputs_written
,
792 &producer
->info
.outputs_read
,
793 &producer
->info
.patch_outputs_written
,
794 &producer
->info
.patch_outputs_read
);
797 /* We assume that this has been called more-or-less directly after
798 * remove_unused_varyings. At this point, all of the varyings that we
799 * aren't going to be using have been completely removed and the
800 * inputs_read and outputs_written fields in nir_shader_info reflect
801 * this. Therefore, the total set of valid slots is the OR of the two
802 * sets of varyings; this accounts for varyings which one side may need
803 * to read/write even if the other doesn't. This can happen if, for
804 * instance, an array is used indirectly from one side causing it to be
805 * unsplittable but directly from the other.
808 nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
809 bool default_to_smooth_interp
)
811 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
812 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
814 struct assigned_comps assigned_comps
[MAX_VARYINGS_INCL_PATCH
] = {{0}};
816 get_unmoveable_components_masks(producer
, nir_var_shader_out
,
818 producer
->info
.stage
,
819 default_to_smooth_interp
);
820 get_unmoveable_components_masks(consumer
, nir_var_shader_in
,
822 consumer
->info
.stage
,
823 default_to_smooth_interp
);
825 compact_components(producer
, consumer
, assigned_comps
,
826 default_to_smooth_interp
);
830 * Mark XFB varyings as always_active_io in the consumer so the linking opts
834 nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
)
836 nir_variable
*input_vars
[MAX_VARYING
] = { 0 };
838 nir_foreach_shader_in_variable(var
, consumer
) {
839 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
840 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
842 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
843 input_vars
[location
] = var
;
847 nir_foreach_shader_out_variable(var
, producer
) {
848 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
849 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
851 if (!var
->data
.always_active_io
)
854 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
855 if (input_vars
[location
]) {
856 input_vars
[location
]->data
.always_active_io
= true;
863 does_varying_match(nir_variable
*out_var
, nir_variable
*in_var
)
865 return in_var
->data
.location
== out_var
->data
.location
&&
866 in_var
->data
.location_frac
== out_var
->data
.location_frac
;
869 static nir_variable
*
870 get_matching_input_var(nir_shader
*consumer
, nir_variable
*out_var
)
872 nir_foreach_shader_in_variable(var
, consumer
) {
873 if (does_varying_match(out_var
, var
))
881 can_replace_varying(nir_variable
*out_var
)
883 /* Skip types that require more complex handling.
884 * TODO: add support for these types.
886 if (glsl_type_is_array(out_var
->type
) ||
887 glsl_type_is_dual_slot(out_var
->type
) ||
888 glsl_type_is_matrix(out_var
->type
) ||
889 glsl_type_is_struct_or_ifc(out_var
->type
))
892 /* Limit this pass to scalars for now to keep things simple. Most varyings
893 * should have been lowered to scalars at this point anyway.
895 if (!glsl_type_is_scalar(out_var
->type
))
898 if (out_var
->data
.location
< VARYING_SLOT_VAR0
||
899 out_var
->data
.location
- VARYING_SLOT_VAR0
>= MAX_VARYING
)
906 replace_constant_input(nir_shader
*shader
, nir_intrinsic_instr
*store_intr
)
908 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
911 nir_builder_init(&b
, impl
);
913 nir_variable
*out_var
=
914 nir_deref_instr_get_variable(nir_src_as_deref(store_intr
->src
[0]));
916 bool progress
= false;
917 nir_foreach_block(block
, impl
) {
918 nir_foreach_instr(instr
, block
) {
919 if (instr
->type
!= nir_instr_type_intrinsic
)
922 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
923 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
926 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
927 if (in_deref
->mode
!= nir_var_shader_in
)
930 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
932 if (!does_varying_match(out_var
, in_var
))
935 b
.cursor
= nir_before_instr(instr
);
937 nir_load_const_instr
*out_const
=
938 nir_instr_as_load_const(store_intr
->src
[1].ssa
->parent_instr
);
940 /* Add new const to replace the input */
941 nir_ssa_def
*nconst
= nir_build_imm(&b
, store_intr
->num_components
,
942 intr
->dest
.ssa
.bit_size
,
945 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(nconst
));
955 replace_duplicate_input(nir_shader
*shader
, nir_variable
*input_var
,
956 nir_intrinsic_instr
*dup_store_intr
)
960 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
963 nir_builder_init(&b
, impl
);
965 nir_variable
*dup_out_var
=
966 nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr
->src
[0]));
968 bool progress
= false;
969 nir_foreach_block(block
, impl
) {
970 nir_foreach_instr(instr
, block
) {
971 if (instr
->type
!= nir_instr_type_intrinsic
)
974 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
975 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
978 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
979 if (in_deref
->mode
!= nir_var_shader_in
)
982 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
984 if (!does_varying_match(dup_out_var
, in_var
) ||
985 in_var
->data
.interpolation
!= input_var
->data
.interpolation
||
986 get_interp_loc(in_var
) != get_interp_loc(input_var
))
989 b
.cursor
= nir_before_instr(instr
);
991 nir_ssa_def
*load
= nir_load_var(&b
, input_var
);
992 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(load
));
1002 nir_link_opt_varyings(nir_shader
*producer
, nir_shader
*consumer
)
1004 /* TODO: Add support for more shader stage combinations */
1005 if (consumer
->info
.stage
!= MESA_SHADER_FRAGMENT
||
1006 (producer
->info
.stage
!= MESA_SHADER_VERTEX
&&
1007 producer
->info
.stage
!= MESA_SHADER_TESS_EVAL
))
1010 bool progress
= false;
1012 nir_function_impl
*impl
= nir_shader_get_entrypoint(producer
);
1014 struct hash_table
*varying_values
= _mesa_pointer_hash_table_create(NULL
);
1016 /* If we find a store in the last block of the producer we can be sure this
1017 * is the only possible value for this output.
1019 nir_block
*last_block
= nir_impl_last_block(impl
);
1020 nir_foreach_instr_reverse(instr
, last_block
) {
1021 if (instr
->type
!= nir_instr_type_intrinsic
)
1024 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
1026 if (intr
->intrinsic
!= nir_intrinsic_store_deref
)
1029 nir_deref_instr
*out_deref
= nir_src_as_deref(intr
->src
[0]);
1030 if (out_deref
->mode
!= nir_var_shader_out
)
1033 nir_variable
*out_var
= nir_deref_instr_get_variable(out_deref
);
1034 if (!can_replace_varying(out_var
))
1037 if (intr
->src
[1].ssa
->parent_instr
->type
== nir_instr_type_load_const
) {
1038 progress
|= replace_constant_input(consumer
, intr
);
1040 struct hash_entry
*entry
=
1041 _mesa_hash_table_search(varying_values
, intr
->src
[1].ssa
);
1043 progress
|= replace_duplicate_input(consumer
,
1044 (nir_variable
*) entry
->data
,
1047 nir_variable
*in_var
= get_matching_input_var(consumer
, out_var
);
1049 _mesa_hash_table_insert(varying_values
, intr
->src
[1].ssa
,
1056 _mesa_hash_table_destroy(varying_values
, NULL
);
1061 /* TODO any better helper somewhere to sort a list? */
1064 insert_sorted(struct exec_list
*var_list
, nir_variable
*new_var
)
1066 nir_foreach_variable_in_list(var
, var_list
) {
1067 if (var
->data
.location
> new_var
->data
.location
) {
1068 exec_node_insert_node_before(&var
->node
, &new_var
->node
);
1072 exec_list_push_tail(var_list
, &new_var
->node
);
1076 sort_varyings(nir_shader
*shader
, nir_variable_mode mode
,
1077 struct exec_list
*sorted_list
)
1079 exec_list_make_empty(sorted_list
);
1080 nir_foreach_variable_with_modes_safe(var
, shader
, mode
) {
1081 exec_node_remove(&var
->node
);
1082 insert_sorted(sorted_list
, var
);
1087 nir_assign_io_var_locations(nir_shader
*shader
, nir_variable_mode mode
,
1088 unsigned *size
, gl_shader_stage stage
)
1090 unsigned location
= 0;
1091 unsigned assigned_locations
[VARYING_SLOT_TESS_MAX
];
1092 uint64_t processed_locs
[2] = {0};
1094 struct exec_list io_vars
;
1095 sort_varyings(shader
, mode
, &io_vars
);
1097 int UNUSED last_loc
= 0;
1098 bool last_partial
= false;
1099 nir_foreach_variable_in_list(var
, &io_vars
) {
1100 const struct glsl_type
*type
= var
->type
;
1101 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
1102 assert(glsl_type_is_array(type
));
1103 type
= glsl_get_array_element(type
);
1107 if (var
->data
.mode
== nir_var_shader_in
&& stage
== MESA_SHADER_VERTEX
)
1108 base
= VERT_ATTRIB_GENERIC0
;
1109 else if (var
->data
.mode
== nir_var_shader_out
&&
1110 stage
== MESA_SHADER_FRAGMENT
)
1111 base
= FRAG_RESULT_DATA0
;
1113 base
= VARYING_SLOT_VAR0
;
1116 if (var
->data
.compact
) {
1117 /* If we are inside a partial compact,
1118 * don't allow another compact to be in this slot
1119 * if it starts at component 0.
1121 if (last_partial
&& var
->data
.location_frac
== 0) {
1125 /* compact variables must be arrays of scalars */
1126 assert(glsl_type_is_array(type
));
1127 assert(glsl_type_is_scalar(glsl_get_array_element(type
)));
1128 unsigned start
= 4 * location
+ var
->data
.location_frac
;
1129 unsigned end
= start
+ glsl_get_length(type
);
1130 var_size
= end
/ 4 - location
;
1131 last_partial
= end
% 4 != 0;
1133 /* Compact variables bypass the normal varying compacting pass,
1134 * which means they cannot be in the same vec4 slot as a normal
1135 * variable. If part of the current slot is taken up by a compact
1136 * variable, we need to go to the next one.
1140 last_partial
= false;
1142 var_size
= glsl_count_attribute_slots(type
, false);
1145 /* Builtins don't allow component packing so we only need to worry about
1146 * user defined varyings sharing the same location.
1148 bool processed
= false;
1149 if (var
->data
.location
>= base
) {
1150 unsigned glsl_location
= var
->data
.location
- base
;
1152 for (unsigned i
= 0; i
< var_size
; i
++) {
1153 if (processed_locs
[var
->data
.index
] &
1154 ((uint64_t)1 << (glsl_location
+ i
)))
1157 processed_locs
[var
->data
.index
] |=
1158 ((uint64_t)1 << (glsl_location
+ i
));
1162 /* Because component packing allows varyings to share the same location
1163 * we may have already have processed this location.
1166 unsigned driver_location
= assigned_locations
[var
->data
.location
];
1167 var
->data
.driver_location
= driver_location
;
1169 /* An array may be packed such that is crosses multiple other arrays
1170 * or variables, we need to make sure we have allocated the elements
1171 * consecutively if the previously proccessed var was shorter than
1172 * the current array we are processing.
1174 * NOTE: The code below assumes the var list is ordered in ascending
1177 assert(last_loc
<= var
->data
.location
);
1178 last_loc
= var
->data
.location
;
1179 unsigned last_slot_location
= driver_location
+ var_size
;
1180 if (last_slot_location
> location
) {
1181 unsigned num_unallocated_slots
= last_slot_location
- location
;
1182 unsigned first_unallocated_slot
= var_size
- num_unallocated_slots
;
1183 for (unsigned i
= first_unallocated_slot
; i
< var_size
; i
++) {
1184 assigned_locations
[var
->data
.location
+ i
] = location
;
1191 for (unsigned i
= 0; i
< var_size
; i
++) {
1192 assigned_locations
[var
->data
.location
+ i
] = location
+ i
;
1195 var
->data
.driver_location
= location
;
1196 location
+= var_size
;
1202 exec_list_append(&shader
->variables
, &io_vars
);
1207 get_linked_variable_location(unsigned location
, bool patch
)
1212 /* Reserve locations 0...3 for special patch variables
1213 * like tess factors and bounding boxes, and the generic patch
1214 * variables will come after them.
1216 if (location
>= VARYING_SLOT_PATCH0
)
1217 return location
- VARYING_SLOT_PATCH0
+ 4;
1218 else if (location
>= VARYING_SLOT_TESS_LEVEL_OUTER
&&
1219 location
<= VARYING_SLOT_BOUNDING_BOX1
)
1220 return location
- VARYING_SLOT_TESS_LEVEL_OUTER
;
1222 unreachable("Unsupported variable in get_linked_variable_location.");
1226 get_linked_variable_io_mask(nir_variable
*variable
, gl_shader_stage stage
)
1228 const struct glsl_type
*type
= variable
->type
;
1230 if (nir_is_per_vertex_io(variable
, stage
)) {
1231 assert(glsl_type_is_array(type
));
1232 type
= glsl_get_array_element(type
);
1235 unsigned slots
= glsl_count_attribute_slots(type
, false);
1236 if (variable
->data
.compact
) {
1237 unsigned component_count
= variable
->data
.location_frac
+ glsl_get_length(type
);
1238 slots
= DIV_ROUND_UP(component_count
, 4);
1241 uint64_t mask
= u_bit_consecutive64(0, slots
);
1245 nir_linked_io_var_info
1246 nir_assign_linked_io_var_locations(nir_shader
*producer
, nir_shader
*consumer
)
1251 uint64_t producer_output_mask
= 0;
1252 uint64_t producer_patch_output_mask
= 0;
1254 nir_foreach_shader_out_variable(variable
, producer
) {
1255 uint64_t mask
= get_linked_variable_io_mask(variable
, producer
->info
.stage
);
1256 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1258 if (variable
->data
.patch
)
1259 producer_patch_output_mask
|= mask
<< loc
;
1261 producer_output_mask
|= mask
<< loc
;
1264 uint64_t consumer_input_mask
= 0;
1265 uint64_t consumer_patch_input_mask
= 0;
1267 nir_foreach_shader_in_variable(variable
, consumer
) {
1268 uint64_t mask
= get_linked_variable_io_mask(variable
, consumer
->info
.stage
);
1269 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1271 if (variable
->data
.patch
)
1272 consumer_patch_input_mask
|= mask
<< loc
;
1274 consumer_input_mask
|= mask
<< loc
;
1277 uint64_t io_mask
= producer_output_mask
| consumer_input_mask
;
1278 uint64_t patch_io_mask
= producer_patch_output_mask
| consumer_patch_input_mask
;
1280 nir_foreach_shader_out_variable(variable
, producer
) {
1281 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1283 if (variable
->data
.patch
)
1284 variable
->data
.driver_location
= util_bitcount64(patch_io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1286 variable
->data
.driver_location
= util_bitcount64(io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1289 nir_foreach_shader_in_variable(variable
, consumer
) {
1290 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1292 if (variable
->data
.patch
)
1293 variable
->data
.driver_location
= util_bitcount64(patch_io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1295 variable
->data
.driver_location
= util_bitcount64(io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1298 nir_linked_io_var_info result
= {
1299 .num_linked_io_vars
= util_bitcount64(io_mask
),
1300 .num_linked_patch_io_vars
= util_bitcount64(patch_io_mask
),