2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 #include "util/hash_table.h"
29 /* This file contains various little helpers for doing simple linking in
30 * NIR. Eventually, we'll probably want a full-blown varying packing
31 * implementation in here. Right now, it just deletes unused things.
35 * Returns the bits in the inputs_read, outputs_written, or
36 * system_values_read bitfield corresponding to this variable.
39 get_variable_io_mask(nir_variable
*var
, gl_shader_stage stage
)
41 if (var
->data
.location
< 0)
44 unsigned location
= var
->data
.patch
?
45 var
->data
.location
- VARYING_SLOT_PATCH0
: var
->data
.location
;
47 assert(var
->data
.mode
== nir_var_shader_in
||
48 var
->data
.mode
== nir_var_shader_out
||
49 var
->data
.mode
== nir_var_system_value
);
50 assert(var
->data
.location
>= 0);
52 const struct glsl_type
*type
= var
->type
;
53 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
54 assert(glsl_type_is_array(type
));
55 type
= glsl_get_array_element(type
);
58 unsigned slots
= glsl_count_attribute_slots(type
, false);
59 return ((1ull << slots
) - 1) << location
;
63 get_num_components(nir_variable
*var
)
65 if (glsl_type_is_struct_or_ifc(glsl_without_array(var
->type
)))
68 return glsl_get_vector_elements(glsl_without_array(var
->type
));
72 tcs_add_output_reads(nir_shader
*shader
, uint64_t *read
, uint64_t *patches_read
)
74 nir_foreach_function(function
, shader
) {
78 nir_foreach_block(block
, function
->impl
) {
79 nir_foreach_instr(instr
, block
) {
80 if (instr
->type
!= nir_instr_type_intrinsic
)
83 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
84 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
87 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
88 if (deref
->mode
!= nir_var_shader_out
)
91 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
92 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
93 if (var
->data
.patch
) {
94 patches_read
[var
->data
.location_frac
+ i
] |=
95 get_variable_io_mask(var
, shader
->info
.stage
);
97 read
[var
->data
.location_frac
+ i
] |=
98 get_variable_io_mask(var
, shader
->info
.stage
);
107 * Helper for removing unused shader I/O variables, by demoting them to global
108 * variables (which may then by dead code eliminated).
112 * progress = nir_remove_unused_io_vars(producer, nir_var_shader_out,
113 * read, patches_read) ||
116 * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
117 * representing each .location_frac used. Note that for vector variables,
118 * only the first channel (.location_frac) is examined for deciding if the
122 nir_remove_unused_io_vars(nir_shader
*shader
,
123 nir_variable_mode mode
,
124 uint64_t *used_by_other_stage
,
125 uint64_t *used_by_other_stage_patches
)
127 bool progress
= false;
130 assert(mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
);
131 struct exec_list
*var_list
= nir_variable_list_for_mode(shader
, mode
);
133 nir_foreach_variable_safe(var
, var_list
) {
135 used
= used_by_other_stage_patches
;
137 used
= used_by_other_stage
;
139 if (var
->data
.location
< VARYING_SLOT_VAR0
&& var
->data
.location
>= 0)
142 if (var
->data
.always_active_io
)
145 if (var
->data
.explicit_xfb_buffer
)
148 uint64_t other_stage
= used
[var
->data
.location_frac
];
150 if (!(other_stage
& get_variable_io_mask(var
, shader
->info
.stage
))) {
151 /* This one is invalid, make it a global variable instead */
152 var
->data
.location
= 0;
153 var
->data
.mode
= nir_var_shader_temp
;
155 exec_node_remove(&var
->node
);
156 exec_list_push_tail(&shader
->globals
, &var
->node
);
163 nir_fixup_deref_modes(shader
);
169 nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
)
171 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
172 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
174 uint64_t read
[4] = { 0 }, written
[4] = { 0 };
175 uint64_t patches_read
[4] = { 0 }, patches_written
[4] = { 0 };
177 nir_foreach_shader_out_variable(var
, producer
) {
178 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
179 if (var
->data
.patch
) {
180 patches_written
[var
->data
.location_frac
+ i
] |=
181 get_variable_io_mask(var
, producer
->info
.stage
);
183 written
[var
->data
.location_frac
+ i
] |=
184 get_variable_io_mask(var
, producer
->info
.stage
);
189 nir_foreach_shader_in_variable(var
, consumer
) {
190 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
191 if (var
->data
.patch
) {
192 patches_read
[var
->data
.location_frac
+ i
] |=
193 get_variable_io_mask(var
, consumer
->info
.stage
);
195 read
[var
->data
.location_frac
+ i
] |=
196 get_variable_io_mask(var
, consumer
->info
.stage
);
201 /* Each TCS invocation can read data written by other TCS invocations,
202 * so even if the outputs are not used by the TES we must also make
203 * sure they are not read by the TCS before demoting them to globals.
205 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
)
206 tcs_add_output_reads(producer
, read
, patches_read
);
208 bool progress
= false;
209 progress
= nir_remove_unused_io_vars(producer
, nir_var_shader_out
, read
,
212 progress
= nir_remove_unused_io_vars(consumer
, nir_var_shader_in
, written
,
213 patches_written
) || progress
;
219 get_interp_type(nir_variable
*var
, const struct glsl_type
*type
,
220 bool default_to_smooth_interp
)
222 if (glsl_type_is_integer(type
))
223 return INTERP_MODE_FLAT
;
224 else if (var
->data
.interpolation
!= INTERP_MODE_NONE
)
225 return var
->data
.interpolation
;
226 else if (default_to_smooth_interp
)
227 return INTERP_MODE_SMOOTH
;
229 return INTERP_MODE_NONE
;
232 #define INTERPOLATE_LOC_SAMPLE 0
233 #define INTERPOLATE_LOC_CENTROID 1
234 #define INTERPOLATE_LOC_CENTER 2
237 get_interp_loc(nir_variable
*var
)
239 if (var
->data
.sample
)
240 return INTERPOLATE_LOC_SAMPLE
;
241 else if (var
->data
.centroid
)
242 return INTERPOLATE_LOC_CENTROID
;
244 return INTERPOLATE_LOC_CENTER
;
248 is_packing_supported_for_type(const struct glsl_type
*type
)
250 /* We ignore complex types such as arrays, matrices, structs and bitsizes
251 * other then 32bit. All other vector types should have been split into
252 * scalar variables by the lower_io_to_scalar pass. The only exception
253 * should be OpenGL xfb varyings.
254 * TODO: add support for more complex types?
256 return glsl_type_is_scalar(type
) && glsl_type_is_32bit(type
);
259 struct assigned_comps
267 /* Packing arrays and dual slot varyings is difficult so to avoid complex
268 * algorithms this function just assigns them their existing location for now.
269 * TODO: allow better packing of complex types.
272 get_unmoveable_components_masks(struct exec_list
*var_list
,
273 struct assigned_comps
*comps
,
274 gl_shader_stage stage
,
275 bool default_to_smooth_interp
)
277 nir_foreach_variable_safe(var
, var_list
) {
278 assert(var
->data
.location
>= 0);
280 /* Only remap things that aren't built-ins. */
281 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
282 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
284 const struct glsl_type
*type
= var
->type
;
285 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
286 assert(glsl_type_is_array(type
));
287 type
= glsl_get_array_element(type
);
290 /* If we can pack this varying then don't mark the components as
293 if (is_packing_supported_for_type(type
))
296 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
299 glsl_type_is_vector_or_scalar(glsl_without_array(type
)) ?
300 glsl_get_vector_elements(glsl_without_array(type
)) : 4;
302 bool dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
303 unsigned slots
= glsl_count_attribute_slots(type
, false);
304 unsigned dmul
= glsl_type_is_64bit(glsl_without_array(type
)) ? 2 : 1;
305 unsigned comps_slot2
= 0;
306 for (unsigned i
= 0; i
< slots
; i
++) {
309 comps
[location
+ i
].comps
|= ((1 << comps_slot2
) - 1);
311 unsigned num_comps
= 4 - var
->data
.location_frac
;
312 comps_slot2
= (elements
* dmul
) - num_comps
;
314 /* Assume ARB_enhanced_layouts packing rules for doubles */
315 assert(var
->data
.location_frac
== 0 ||
316 var
->data
.location_frac
== 2);
317 assert(comps_slot2
<= 4);
319 comps
[location
+ i
].comps
|=
320 ((1 << num_comps
) - 1) << var
->data
.location_frac
;
323 comps
[location
+ i
].comps
|=
324 ((1 << (elements
* dmul
)) - 1) << var
->data
.location_frac
;
327 comps
[location
+ i
].interp_type
=
328 get_interp_type(var
, type
, default_to_smooth_interp
);
329 comps
[location
+ i
].interp_loc
= get_interp_loc(var
);
330 comps
[location
+ i
].is_32bit
=
331 glsl_type_is_32bit(glsl_without_array(type
));
344 mark_all_used_slots(nir_variable
*var
, uint64_t *slots_used
,
345 uint64_t slots_used_mask
, unsigned num_slots
)
347 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
349 slots_used
[var
->data
.patch
? 1 : 0] |= slots_used_mask
&
350 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
354 mark_used_slot(nir_variable
*var
, uint64_t *slots_used
, unsigned offset
)
356 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
358 slots_used
[var
->data
.patch
? 1 : 0] |=
359 BITFIELD64_BIT(var
->data
.location
- loc_offset
+ offset
);
363 remap_slots_and_components(struct exec_list
*var_list
, gl_shader_stage stage
,
364 struct varying_loc (*remap
)[4],
365 uint64_t *slots_used
, uint64_t *out_slots_read
,
366 uint32_t *p_slots_used
, uint32_t *p_out_slots_read
)
368 uint64_t out_slots_read_tmp
[2] = {0};
369 uint64_t slots_used_tmp
[2] = {0};
371 /* We don't touch builtins so just copy the bitmask */
372 slots_used_tmp
[0] = *slots_used
& BITFIELD64_RANGE(0, VARYING_SLOT_VAR0
);
374 nir_foreach_variable(var
, var_list
) {
375 assert(var
->data
.location
>= 0);
377 /* Only remap things that aren't built-ins */
378 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
379 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
381 const struct glsl_type
*type
= var
->type
;
382 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
383 assert(glsl_type_is_array(type
));
384 type
= glsl_get_array_element(type
);
387 unsigned num_slots
= glsl_count_attribute_slots(type
, false);
388 bool used_across_stages
= false;
389 bool outputs_read
= false;
391 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
392 struct varying_loc
*new_loc
= &remap
[location
][var
->data
.location_frac
];
394 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
395 uint64_t used
= var
->data
.patch
? *p_slots_used
: *slots_used
;
397 var
->data
.patch
? *p_out_slots_read
: *out_slots_read
;
399 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
402 used_across_stages
= true;
404 if (slots
& outs_used
)
407 if (new_loc
->location
) {
408 var
->data
.location
= new_loc
->location
;
409 var
->data
.location_frac
= new_loc
->component
;
412 if (var
->data
.always_active_io
) {
413 /* We can't apply link time optimisations (specifically array
414 * splitting) to these so we need to copy the existing mask
415 * otherwise we will mess up the mask for things like partially
418 if (used_across_stages
)
419 mark_all_used_slots(var
, slots_used_tmp
, used
, num_slots
);
422 mark_all_used_slots(var
, out_slots_read_tmp
, outs_used
,
426 for (unsigned i
= 0; i
< num_slots
; i
++) {
427 if (used_across_stages
)
428 mark_used_slot(var
, slots_used_tmp
, i
);
431 mark_used_slot(var
, out_slots_read_tmp
, i
);
437 *slots_used
= slots_used_tmp
[0];
438 *out_slots_read
= out_slots_read_tmp
[0];
439 *p_slots_used
= slots_used_tmp
[1];
440 *p_out_slots_read
= out_slots_read_tmp
[1];
443 struct varying_component
{
449 bool is_intra_stage_only
;
454 cmp_varying_component(const void *comp1_v
, const void *comp2_v
)
456 struct varying_component
*comp1
= (struct varying_component
*) comp1_v
;
457 struct varying_component
*comp2
= (struct varying_component
*) comp2_v
;
459 /* We want patches to be order at the end of the array */
460 if (comp1
->is_patch
!= comp2
->is_patch
)
461 return comp1
->is_patch
? 1 : -1;
463 /* We want to try to group together TCS outputs that are only read by other
464 * TCS invocations and not consumed by the follow stage.
466 if (comp1
->is_intra_stage_only
!= comp2
->is_intra_stage_only
)
467 return comp1
->is_intra_stage_only
? 1 : -1;
469 /* We can only pack varyings with matching interpolation types so group
472 if (comp1
->interp_type
!= comp2
->interp_type
)
473 return comp1
->interp_type
- comp2
->interp_type
;
475 /* Interpolation loc must match also. */
476 if (comp1
->interp_loc
!= comp2
->interp_loc
)
477 return comp1
->interp_loc
- comp2
->interp_loc
;
479 /* If everything else matches just use the original location to sort */
480 return comp1
->var
->data
.location
- comp2
->var
->data
.location
;
484 gather_varying_component_info(nir_shader
*producer
, nir_shader
*consumer
,
485 struct varying_component
**varying_comp_info
,
486 unsigned *varying_comp_info_size
,
487 bool default_to_smooth_interp
)
489 unsigned store_varying_info_idx
[MAX_VARYINGS_INCL_PATCH
][4] = {{0}};
490 unsigned num_of_comps_to_pack
= 0;
492 /* Count the number of varying that can be packed and create a mapping
493 * of those varyings to the array we will pass to qsort.
495 nir_foreach_shader_out_variable(var
, producer
) {
497 /* Only remap things that aren't builtins. */
498 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
499 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
501 /* We can't repack xfb varyings. */
502 if (var
->data
.always_active_io
)
505 const struct glsl_type
*type
= var
->type
;
506 if (nir_is_per_vertex_io(var
, producer
->info
.stage
) || var
->data
.per_view
) {
507 assert(glsl_type_is_array(type
));
508 type
= glsl_get_array_element(type
);
511 if (!is_packing_supported_for_type(type
))
514 unsigned loc
= var
->data
.location
- VARYING_SLOT_VAR0
;
515 store_varying_info_idx
[loc
][var
->data
.location_frac
] =
516 ++num_of_comps_to_pack
;
520 *varying_comp_info_size
= num_of_comps_to_pack
;
521 *varying_comp_info
= rzalloc_array(NULL
, struct varying_component
,
522 num_of_comps_to_pack
);
524 nir_function_impl
*impl
= nir_shader_get_entrypoint(consumer
);
526 /* Walk over the shader and populate the varying component info array */
527 nir_foreach_block(block
, impl
) {
528 nir_foreach_instr(instr
, block
) {
529 if (instr
->type
!= nir_instr_type_intrinsic
)
532 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
533 if (intr
->intrinsic
!= nir_intrinsic_load_deref
&&
534 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_centroid
&&
535 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_sample
&&
536 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_offset
&&
537 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_vertex
)
540 nir_deref_instr
*deref
= nir_src_as_deref(intr
->src
[0]);
541 if (deref
->mode
!= nir_var_shader_in
)
544 /* We only remap things that aren't builtins. */
545 nir_variable
*in_var
= nir_deref_instr_get_variable(deref
);
546 if (in_var
->data
.location
< VARYING_SLOT_VAR0
)
549 unsigned location
= in_var
->data
.location
- VARYING_SLOT_VAR0
;
550 if (location
>= MAX_VARYINGS_INCL_PATCH
)
553 unsigned var_info_idx
=
554 store_varying_info_idx
[location
][in_var
->data
.location_frac
];
558 struct varying_component
*vc_info
=
559 &(*varying_comp_info
)[var_info_idx
-1];
561 if (!vc_info
->initialised
) {
562 const struct glsl_type
*type
= in_var
->type
;
563 if (nir_is_per_vertex_io(in_var
, consumer
->info
.stage
) ||
564 in_var
->data
.per_view
) {
565 assert(glsl_type_is_array(type
));
566 type
= glsl_get_array_element(type
);
569 vc_info
->var
= in_var
;
570 vc_info
->interp_type
=
571 get_interp_type(in_var
, type
, default_to_smooth_interp
);
572 vc_info
->interp_loc
= get_interp_loc(in_var
);
573 vc_info
->is_32bit
= glsl_type_is_32bit(type
);
574 vc_info
->is_patch
= in_var
->data
.patch
;
575 vc_info
->is_intra_stage_only
= false;
576 vc_info
->initialised
= true;
581 /* Walk over the shader and populate the varying component info array
582 * for varyings which are read by other TCS instances but are not consumed
585 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
586 impl
= nir_shader_get_entrypoint(producer
);
588 nir_foreach_block(block
, impl
) {
589 nir_foreach_instr(instr
, block
) {
590 if (instr
->type
!= nir_instr_type_intrinsic
)
593 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
594 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
597 nir_deref_instr
*deref
= nir_src_as_deref(intr
->src
[0]);
598 if (deref
->mode
!= nir_var_shader_out
)
601 /* We only remap things that aren't builtins. */
602 nir_variable
*out_var
= nir_deref_instr_get_variable(deref
);
603 if (out_var
->data
.location
< VARYING_SLOT_VAR0
)
606 unsigned location
= out_var
->data
.location
- VARYING_SLOT_VAR0
;
607 if (location
>= MAX_VARYINGS_INCL_PATCH
)
610 unsigned var_info_idx
=
611 store_varying_info_idx
[location
][out_var
->data
.location_frac
];
613 /* Something went wrong, the shader interfaces didn't match, so
614 * abandon packing. This can happen for example when the
615 * inputs are scalars but the outputs are struct members.
617 *varying_comp_info_size
= 0;
621 struct varying_component
*vc_info
=
622 &(*varying_comp_info
)[var_info_idx
-1];
624 if (!vc_info
->initialised
) {
625 const struct glsl_type
*type
= out_var
->type
;
626 if (nir_is_per_vertex_io(out_var
, producer
->info
.stage
)) {
627 assert(glsl_type_is_array(type
));
628 type
= glsl_get_array_element(type
);
631 vc_info
->var
= out_var
;
632 vc_info
->interp_type
=
633 get_interp_type(out_var
, type
, default_to_smooth_interp
);
634 vc_info
->interp_loc
= get_interp_loc(out_var
);
635 vc_info
->is_32bit
= glsl_type_is_32bit(type
);
636 vc_info
->is_patch
= out_var
->data
.patch
;
637 vc_info
->is_intra_stage_only
= true;
638 vc_info
->initialised
= true;
644 for (unsigned i
= 0; i
< *varying_comp_info_size
; i
++ ) {
645 struct varying_component
*vc_info
= &(*varying_comp_info
)[i
];
646 if (!vc_info
->initialised
) {
647 /* Something went wrong, the shader interfaces didn't match, so
648 * abandon packing. This can happen for example when the outputs are
649 * scalars but the inputs are struct members.
651 *varying_comp_info_size
= 0;
658 assign_remap_locations(struct varying_loc (*remap
)[4],
659 struct assigned_comps
*assigned_comps
,
660 struct varying_component
*info
,
661 unsigned *cursor
, unsigned *comp
,
662 unsigned max_location
)
664 unsigned tmp_cursor
= *cursor
;
665 unsigned tmp_comp
= *comp
;
667 for (; tmp_cursor
< max_location
; tmp_cursor
++) {
669 if (assigned_comps
[tmp_cursor
].comps
) {
670 /* We can only pack varyings with matching interpolation types,
671 * interpolation loc must match also.
672 * TODO: i965 can handle interpolation locations that don't match,
673 * but the radeonsi nir backend handles everything as vec4s and so
674 * expects this to be the same for all components. We could make this
675 * check driver specfific or drop it if NIR ever become the only
678 if (assigned_comps
[tmp_cursor
].interp_type
!= info
->interp_type
||
679 assigned_comps
[tmp_cursor
].interp_loc
!= info
->interp_loc
) {
684 /* We can only pack varyings with matching types, and the current
685 * algorithm only supports packing 32-bit.
687 if (!assigned_comps
[tmp_cursor
].is_32bit
) {
692 while (tmp_comp
< 4 &&
693 (assigned_comps
[tmp_cursor
].comps
& (1 << tmp_comp
))) {
703 unsigned location
= info
->var
->data
.location
- VARYING_SLOT_VAR0
;
705 /* Once we have assigned a location mark it as used */
706 assigned_comps
[tmp_cursor
].comps
|= (1 << tmp_comp
);
707 assigned_comps
[tmp_cursor
].interp_type
= info
->interp_type
;
708 assigned_comps
[tmp_cursor
].interp_loc
= info
->interp_loc
;
709 assigned_comps
[tmp_cursor
].is_32bit
= info
->is_32bit
;
711 /* Assign remap location */
712 remap
[location
][info
->var
->data
.location_frac
].component
= tmp_comp
++;
713 remap
[location
][info
->var
->data
.location_frac
].location
=
714 tmp_cursor
+ VARYING_SLOT_VAR0
;
719 *cursor
= tmp_cursor
;
723 /* If there are empty components in the slot compact the remaining components
724 * as close to component 0 as possible. This will make it easier to fill the
725 * empty components with components from a different slot in a following pass.
728 compact_components(nir_shader
*producer
, nir_shader
*consumer
,
729 struct assigned_comps
*assigned_comps
,
730 bool default_to_smooth_interp
)
732 struct exec_list
*input_list
= &consumer
->inputs
;
733 struct exec_list
*output_list
= &producer
->outputs
;
734 struct varying_loc remap
[MAX_VARYINGS_INCL_PATCH
][4] = {{{0}, {0}}};
735 struct varying_component
*varying_comp_info
;
736 unsigned varying_comp_info_size
;
738 /* Gather varying component info */
739 gather_varying_component_info(producer
, consumer
, &varying_comp_info
,
740 &varying_comp_info_size
,
741 default_to_smooth_interp
);
743 /* Sort varying components. */
744 qsort(varying_comp_info
, varying_comp_info_size
,
745 sizeof(struct varying_component
), cmp_varying_component
);
750 /* Set the remap array based on the sorted components */
751 for (unsigned i
= 0; i
< varying_comp_info_size
; i
++ ) {
752 struct varying_component
*info
= &varying_comp_info
[i
];
754 assert(info
->is_patch
|| cursor
< MAX_VARYING
);
755 if (info
->is_patch
) {
756 /* The list should be sorted with all non-patch inputs first followed
757 * by patch inputs. When we hit our first patch input, we need to
758 * reset the cursor to MAX_VARYING so we put them in the right slot.
760 if (cursor
< MAX_VARYING
) {
761 cursor
= MAX_VARYING
;
765 assign_remap_locations(remap
, assigned_comps
, info
,
766 &cursor
, &comp
, MAX_VARYINGS_INCL_PATCH
);
768 assign_remap_locations(remap
, assigned_comps
, info
,
769 &cursor
, &comp
, MAX_VARYING
);
771 /* Check if we failed to assign a remap location. This can happen if
772 * for example there are a bunch of unmovable components with
773 * mismatching interpolation types causing us to skip over locations
774 * that would have been useful for packing later components.
775 * The solution is to iterate over the locations again (this should
776 * happen very rarely in practice).
778 if (cursor
== MAX_VARYING
) {
781 assign_remap_locations(remap
, assigned_comps
, info
,
782 &cursor
, &comp
, MAX_VARYING
);
787 ralloc_free(varying_comp_info
);
791 remap_slots_and_components(input_list
, consumer
->info
.stage
, remap
,
792 &consumer
->info
.inputs_read
, &zero
,
793 &consumer
->info
.patch_inputs_read
, &zero32
);
794 remap_slots_and_components(output_list
, producer
->info
.stage
, remap
,
795 &producer
->info
.outputs_written
,
796 &producer
->info
.outputs_read
,
797 &producer
->info
.patch_outputs_written
,
798 &producer
->info
.patch_outputs_read
);
801 /* We assume that this has been called more-or-less directly after
802 * remove_unused_varyings. At this point, all of the varyings that we
803 * aren't going to be using have been completely removed and the
804 * inputs_read and outputs_written fields in nir_shader_info reflect
805 * this. Therefore, the total set of valid slots is the OR of the two
806 * sets of varyings; this accounts for varyings which one side may need
807 * to read/write even if the other doesn't. This can happen if, for
808 * instance, an array is used indirectly from one side causing it to be
809 * unsplittable but directly from the other.
812 nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
813 bool default_to_smooth_interp
)
815 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
816 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
818 struct assigned_comps assigned_comps
[MAX_VARYINGS_INCL_PATCH
] = {{0}};
820 get_unmoveable_components_masks(&producer
->outputs
, assigned_comps
,
821 producer
->info
.stage
,
822 default_to_smooth_interp
);
823 get_unmoveable_components_masks(&consumer
->inputs
, assigned_comps
,
824 consumer
->info
.stage
,
825 default_to_smooth_interp
);
827 compact_components(producer
, consumer
, assigned_comps
,
828 default_to_smooth_interp
);
832 * Mark XFB varyings as always_active_io in the consumer so the linking opts
836 nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
)
838 nir_variable
*input_vars
[MAX_VARYING
] = { 0 };
840 nir_foreach_shader_in_variable(var
, consumer
) {
841 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
842 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
844 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
845 input_vars
[location
] = var
;
849 nir_foreach_shader_out_variable(var
, producer
) {
850 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
851 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
853 if (!var
->data
.always_active_io
)
856 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
857 if (input_vars
[location
]) {
858 input_vars
[location
]->data
.always_active_io
= true;
865 does_varying_match(nir_variable
*out_var
, nir_variable
*in_var
)
867 return in_var
->data
.location
== out_var
->data
.location
&&
868 in_var
->data
.location_frac
== out_var
->data
.location_frac
;
871 static nir_variable
*
872 get_matching_input_var(nir_shader
*consumer
, nir_variable
*out_var
)
874 nir_foreach_shader_in_variable(var
, consumer
) {
875 if (does_varying_match(out_var
, var
))
883 can_replace_varying(nir_variable
*out_var
)
885 /* Skip types that require more complex handling.
886 * TODO: add support for these types.
888 if (glsl_type_is_array(out_var
->type
) ||
889 glsl_type_is_dual_slot(out_var
->type
) ||
890 glsl_type_is_matrix(out_var
->type
) ||
891 glsl_type_is_struct_or_ifc(out_var
->type
))
894 /* Limit this pass to scalars for now to keep things simple. Most varyings
895 * should have been lowered to scalars at this point anyway.
897 if (!glsl_type_is_scalar(out_var
->type
))
900 if (out_var
->data
.location
< VARYING_SLOT_VAR0
||
901 out_var
->data
.location
- VARYING_SLOT_VAR0
>= MAX_VARYING
)
908 replace_constant_input(nir_shader
*shader
, nir_intrinsic_instr
*store_intr
)
910 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
913 nir_builder_init(&b
, impl
);
915 nir_variable
*out_var
=
916 nir_deref_instr_get_variable(nir_src_as_deref(store_intr
->src
[0]));
918 bool progress
= false;
919 nir_foreach_block(block
, impl
) {
920 nir_foreach_instr(instr
, block
) {
921 if (instr
->type
!= nir_instr_type_intrinsic
)
924 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
925 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
928 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
929 if (in_deref
->mode
!= nir_var_shader_in
)
932 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
934 if (!does_varying_match(out_var
, in_var
))
937 b
.cursor
= nir_before_instr(instr
);
939 nir_load_const_instr
*out_const
=
940 nir_instr_as_load_const(store_intr
->src
[1].ssa
->parent_instr
);
942 /* Add new const to replace the input */
943 nir_ssa_def
*nconst
= nir_build_imm(&b
, store_intr
->num_components
,
944 intr
->dest
.ssa
.bit_size
,
947 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(nconst
));
957 replace_duplicate_input(nir_shader
*shader
, nir_variable
*input_var
,
958 nir_intrinsic_instr
*dup_store_intr
)
962 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
965 nir_builder_init(&b
, impl
);
967 nir_variable
*dup_out_var
=
968 nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr
->src
[0]));
970 bool progress
= false;
971 nir_foreach_block(block
, impl
) {
972 nir_foreach_instr(instr
, block
) {
973 if (instr
->type
!= nir_instr_type_intrinsic
)
976 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
977 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
980 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
981 if (in_deref
->mode
!= nir_var_shader_in
)
984 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
986 if (!does_varying_match(dup_out_var
, in_var
) ||
987 in_var
->data
.interpolation
!= input_var
->data
.interpolation
||
988 get_interp_loc(in_var
) != get_interp_loc(input_var
))
991 b
.cursor
= nir_before_instr(instr
);
993 nir_ssa_def
*load
= nir_load_var(&b
, input_var
);
994 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(load
));
1004 nir_link_opt_varyings(nir_shader
*producer
, nir_shader
*consumer
)
1006 /* TODO: Add support for more shader stage combinations */
1007 if (consumer
->info
.stage
!= MESA_SHADER_FRAGMENT
||
1008 (producer
->info
.stage
!= MESA_SHADER_VERTEX
&&
1009 producer
->info
.stage
!= MESA_SHADER_TESS_EVAL
))
1012 bool progress
= false;
1014 nir_function_impl
*impl
= nir_shader_get_entrypoint(producer
);
1016 struct hash_table
*varying_values
= _mesa_pointer_hash_table_create(NULL
);
1018 /* If we find a store in the last block of the producer we can be sure this
1019 * is the only possible value for this output.
1021 nir_block
*last_block
= nir_impl_last_block(impl
);
1022 nir_foreach_instr_reverse(instr
, last_block
) {
1023 if (instr
->type
!= nir_instr_type_intrinsic
)
1026 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
1028 if (intr
->intrinsic
!= nir_intrinsic_store_deref
)
1031 nir_deref_instr
*out_deref
= nir_src_as_deref(intr
->src
[0]);
1032 if (out_deref
->mode
!= nir_var_shader_out
)
1035 nir_variable
*out_var
= nir_deref_instr_get_variable(out_deref
);
1036 if (!can_replace_varying(out_var
))
1039 if (intr
->src
[1].ssa
->parent_instr
->type
== nir_instr_type_load_const
) {
1040 progress
|= replace_constant_input(consumer
, intr
);
1042 struct hash_entry
*entry
=
1043 _mesa_hash_table_search(varying_values
, intr
->src
[1].ssa
);
1045 progress
|= replace_duplicate_input(consumer
,
1046 (nir_variable
*) entry
->data
,
1049 nir_variable
*in_var
= get_matching_input_var(consumer
, out_var
);
1051 _mesa_hash_table_insert(varying_values
, intr
->src
[1].ssa
,
1058 _mesa_hash_table_destroy(varying_values
, NULL
);
1063 /* TODO any better helper somewhere to sort a list? */
1066 insert_sorted(struct exec_list
*var_list
, nir_variable
*new_var
)
1068 nir_foreach_variable(var
, var_list
) {
1069 if (var
->data
.location
> new_var
->data
.location
) {
1070 exec_node_insert_node_before(&var
->node
, &new_var
->node
);
1074 exec_list_push_tail(var_list
, &new_var
->node
);
1078 sort_varyings(nir_shader
*shader
, nir_variable_mode mode
,
1079 struct exec_list
*sorted_list
)
1081 exec_list_make_empty(sorted_list
);
1082 nir_foreach_variable_with_modes_safe(var
, shader
, mode
) {
1083 exec_node_remove(&var
->node
);
1084 insert_sorted(sorted_list
, var
);
1089 nir_assign_io_var_locations(nir_shader
*shader
, nir_variable_mode mode
,
1090 unsigned *size
, gl_shader_stage stage
)
1092 unsigned location
= 0;
1093 unsigned assigned_locations
[VARYING_SLOT_TESS_MAX
];
1094 uint64_t processed_locs
[2] = {0};
1096 struct exec_list io_vars
;
1097 sort_varyings(shader
, mode
, &io_vars
);
1099 int UNUSED last_loc
= 0;
1100 bool last_partial
= false;
1101 nir_foreach_variable(var
, &io_vars
) {
1102 const struct glsl_type
*type
= var
->type
;
1103 if (nir_is_per_vertex_io(var
, stage
) || var
->data
.per_view
) {
1104 assert(glsl_type_is_array(type
));
1105 type
= glsl_get_array_element(type
);
1109 if (var
->data
.mode
== nir_var_shader_in
&& stage
== MESA_SHADER_VERTEX
)
1110 base
= VERT_ATTRIB_GENERIC0
;
1111 else if (var
->data
.mode
== nir_var_shader_out
&&
1112 stage
== MESA_SHADER_FRAGMENT
)
1113 base
= FRAG_RESULT_DATA0
;
1115 base
= VARYING_SLOT_VAR0
;
1118 if (var
->data
.compact
) {
1119 /* If we are inside a partial compact,
1120 * don't allow another compact to be in this slot
1121 * if it starts at component 0.
1123 if (last_partial
&& var
->data
.location_frac
== 0) {
1127 /* compact variables must be arrays of scalars */
1128 assert(glsl_type_is_array(type
));
1129 assert(glsl_type_is_scalar(glsl_get_array_element(type
)));
1130 unsigned start
= 4 * location
+ var
->data
.location_frac
;
1131 unsigned end
= start
+ glsl_get_length(type
);
1132 var_size
= end
/ 4 - location
;
1133 last_partial
= end
% 4 != 0;
1135 /* Compact variables bypass the normal varying compacting pass,
1136 * which means they cannot be in the same vec4 slot as a normal
1137 * variable. If part of the current slot is taken up by a compact
1138 * variable, we need to go to the next one.
1142 last_partial
= false;
1144 var_size
= glsl_count_attribute_slots(type
, false);
1147 /* Builtins don't allow component packing so we only need to worry about
1148 * user defined varyings sharing the same location.
1150 bool processed
= false;
1151 if (var
->data
.location
>= base
) {
1152 unsigned glsl_location
= var
->data
.location
- base
;
1154 for (unsigned i
= 0; i
< var_size
; i
++) {
1155 if (processed_locs
[var
->data
.index
] &
1156 ((uint64_t)1 << (glsl_location
+ i
)))
1159 processed_locs
[var
->data
.index
] |=
1160 ((uint64_t)1 << (glsl_location
+ i
));
1164 /* Because component packing allows varyings to share the same location
1165 * we may have already have processed this location.
1168 unsigned driver_location
= assigned_locations
[var
->data
.location
];
1169 var
->data
.driver_location
= driver_location
;
1171 /* An array may be packed such that is crosses multiple other arrays
1172 * or variables, we need to make sure we have allocated the elements
1173 * consecutively if the previously proccessed var was shorter than
1174 * the current array we are processing.
1176 * NOTE: The code below assumes the var list is ordered in ascending
1179 assert(last_loc
<= var
->data
.location
);
1180 last_loc
= var
->data
.location
;
1181 unsigned last_slot_location
= driver_location
+ var_size
;
1182 if (last_slot_location
> location
) {
1183 unsigned num_unallocated_slots
= last_slot_location
- location
;
1184 unsigned first_unallocated_slot
= var_size
- num_unallocated_slots
;
1185 for (unsigned i
= first_unallocated_slot
; i
< var_size
; i
++) {
1186 assigned_locations
[var
->data
.location
+ i
] = location
;
1193 for (unsigned i
= 0; i
< var_size
; i
++) {
1194 assigned_locations
[var
->data
.location
+ i
] = location
+ i
;
1197 var
->data
.driver_location
= location
;
1198 location
+= var_size
;
1204 struct exec_list
*var_list
= nir_variable_list_for_mode(shader
, mode
);
1205 exec_list_append(var_list
, &io_vars
);
1210 get_linked_variable_location(unsigned location
, bool patch
)
1215 /* Reserve locations 0...3 for special patch variables
1216 * like tess factors and bounding boxes, and the generic patch
1217 * variables will come after them.
1219 if (location
>= VARYING_SLOT_PATCH0
)
1220 return location
- VARYING_SLOT_PATCH0
+ 4;
1221 else if (location
>= VARYING_SLOT_TESS_LEVEL_OUTER
&&
1222 location
<= VARYING_SLOT_BOUNDING_BOX1
)
1223 return location
- VARYING_SLOT_TESS_LEVEL_OUTER
;
1225 unreachable("Unsupported variable in get_linked_variable_location.");
1229 get_linked_variable_io_mask(nir_variable
*variable
, gl_shader_stage stage
)
1231 const struct glsl_type
*type
= variable
->type
;
1233 if (nir_is_per_vertex_io(variable
, stage
)) {
1234 assert(glsl_type_is_array(type
));
1235 type
= glsl_get_array_element(type
);
1238 unsigned slots
= glsl_count_attribute_slots(type
, false);
1239 if (variable
->data
.compact
) {
1240 unsigned component_count
= variable
->data
.location_frac
+ glsl_get_length(type
);
1241 slots
= DIV_ROUND_UP(component_count
, 4);
1244 uint64_t mask
= u_bit_consecutive64(0, slots
);
1248 nir_linked_io_var_info
1249 nir_assign_linked_io_var_locations(nir_shader
*producer
, nir_shader
*consumer
)
1254 uint64_t producer_output_mask
= 0;
1255 uint64_t producer_patch_output_mask
= 0;
1257 nir_foreach_shader_out_variable(variable
, producer
) {
1258 uint64_t mask
= get_linked_variable_io_mask(variable
, producer
->info
.stage
);
1259 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1261 if (variable
->data
.patch
)
1262 producer_patch_output_mask
|= mask
<< loc
;
1264 producer_output_mask
|= mask
<< loc
;
1267 uint64_t consumer_input_mask
= 0;
1268 uint64_t consumer_patch_input_mask
= 0;
1270 nir_foreach_shader_in_variable(variable
, consumer
) {
1271 uint64_t mask
= get_linked_variable_io_mask(variable
, consumer
->info
.stage
);
1272 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1274 if (variable
->data
.patch
)
1275 consumer_patch_input_mask
|= mask
<< loc
;
1277 consumer_input_mask
|= mask
<< loc
;
1280 uint64_t io_mask
= producer_output_mask
| consumer_input_mask
;
1281 uint64_t patch_io_mask
= producer_patch_output_mask
| consumer_patch_input_mask
;
1283 nir_foreach_shader_out_variable(variable
, producer
) {
1284 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1286 if (variable
->data
.patch
)
1287 variable
->data
.driver_location
= util_bitcount64(patch_io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1289 variable
->data
.driver_location
= util_bitcount64(io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1292 nir_foreach_shader_in_variable(variable
, consumer
) {
1293 uint64_t loc
= get_linked_variable_location(variable
->data
.location
, variable
->data
.patch
);
1295 if (variable
->data
.patch
)
1296 variable
->data
.driver_location
= util_bitcount64(patch_io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1298 variable
->data
.driver_location
= util_bitcount64(io_mask
& u_bit_consecutive64(0, loc
)) * 4;
1301 nir_linked_io_var_info result
= {
1302 .num_linked_io_vars
= util_bitcount64(io_mask
),
1303 .num_linked_patch_io_vars
= util_bitcount64(patch_io_mask
),