2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 #include "util/hash_table.h"
29 /* This file contains various little helpers for doing simple linking in
30 * NIR. Eventually, we'll probably want a full-blown varying packing
31 * implementation in here. Right now, it just deletes unused things.
35 * Returns the bits in the inputs_read, outputs_written, or
36 * system_values_read bitfield corresponding to this variable.
39 get_variable_io_mask(nir_variable
*var
, gl_shader_stage stage
)
41 if (var
->data
.location
< 0)
44 unsigned location
= var
->data
.patch
?
45 var
->data
.location
- VARYING_SLOT_PATCH0
: var
->data
.location
;
47 assert(var
->data
.mode
== nir_var_shader_in
||
48 var
->data
.mode
== nir_var_shader_out
||
49 var
->data
.mode
== nir_var_system_value
);
50 assert(var
->data
.location
>= 0);
52 const struct glsl_type
*type
= var
->type
;
53 if (nir_is_per_vertex_io(var
, stage
)) {
54 assert(glsl_type_is_array(type
));
55 type
= glsl_get_array_element(type
);
58 unsigned slots
= glsl_count_attribute_slots(type
, false);
59 return ((1ull << slots
) - 1) << location
;
63 get_num_components(nir_variable
*var
)
65 if (glsl_type_is_struct_or_ifc(glsl_without_array(var
->type
)))
68 return glsl_get_vector_elements(glsl_without_array(var
->type
));
72 tcs_add_output_reads(nir_shader
*shader
, uint64_t *read
, uint64_t *patches_read
)
74 nir_foreach_function(function
, shader
) {
78 nir_foreach_block(block
, function
->impl
) {
79 nir_foreach_instr(instr
, block
) {
80 if (instr
->type
!= nir_instr_type_intrinsic
)
83 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
84 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
87 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
88 if (deref
->mode
!= nir_var_shader_out
)
91 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
92 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
93 if (var
->data
.patch
) {
94 patches_read
[var
->data
.location_frac
+ i
] |=
95 get_variable_io_mask(var
, shader
->info
.stage
);
97 read
[var
->data
.location_frac
+ i
] |=
98 get_variable_io_mask(var
, shader
->info
.stage
);
107 * Helper for removing unused shader I/O variables, by demoting them to global
108 * variables (which may then by dead code eliminated).
112 * progress = nir_remove_unused_io_vars(producer,
113 * &producer->outputs,
114 * read, patches_read) ||
117 * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
118 * representing each .location_frac used. Note that for vector variables,
119 * only the first channel (.location_frac) is examined for deciding if the
123 nir_remove_unused_io_vars(nir_shader
*shader
, struct exec_list
*var_list
,
124 uint64_t *used_by_other_stage
,
125 uint64_t *used_by_other_stage_patches
)
127 bool progress
= false;
130 nir_foreach_variable_safe(var
, var_list
) {
132 used
= used_by_other_stage_patches
;
134 used
= used_by_other_stage
;
136 if (var
->data
.location
< VARYING_SLOT_VAR0
&& var
->data
.location
>= 0)
139 if (var
->data
.always_active_io
)
142 if (var
->data
.explicit_xfb_buffer
)
145 uint64_t other_stage
= used
[var
->data
.location_frac
];
147 if (!(other_stage
& get_variable_io_mask(var
, shader
->info
.stage
))) {
148 /* This one is invalid, make it a global variable instead */
149 var
->data
.location
= 0;
150 var
->data
.mode
= nir_var_shader_temp
;
152 exec_node_remove(&var
->node
);
153 exec_list_push_tail(&shader
->globals
, &var
->node
);
160 nir_fixup_deref_modes(shader
);
166 nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
)
168 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
169 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
171 uint64_t read
[4] = { 0 }, written
[4] = { 0 };
172 uint64_t patches_read
[4] = { 0 }, patches_written
[4] = { 0 };
174 nir_foreach_variable(var
, &producer
->outputs
) {
175 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
176 if (var
->data
.patch
) {
177 patches_written
[var
->data
.location_frac
+ i
] |=
178 get_variable_io_mask(var
, producer
->info
.stage
);
180 written
[var
->data
.location_frac
+ i
] |=
181 get_variable_io_mask(var
, producer
->info
.stage
);
186 nir_foreach_variable(var
, &consumer
->inputs
) {
187 for (unsigned i
= 0; i
< get_num_components(var
); i
++) {
188 if (var
->data
.patch
) {
189 patches_read
[var
->data
.location_frac
+ i
] |=
190 get_variable_io_mask(var
, consumer
->info
.stage
);
192 read
[var
->data
.location_frac
+ i
] |=
193 get_variable_io_mask(var
, consumer
->info
.stage
);
198 /* Each TCS invocation can read data written by other TCS invocations,
199 * so even if the outputs are not used by the TES we must also make
200 * sure they are not read by the TCS before demoting them to globals.
202 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
)
203 tcs_add_output_reads(producer
, read
, patches_read
);
205 bool progress
= false;
206 progress
= nir_remove_unused_io_vars(producer
, &producer
->outputs
, read
,
209 progress
= nir_remove_unused_io_vars(consumer
, &consumer
->inputs
, written
,
210 patches_written
) || progress
;
216 get_interp_type(nir_variable
*var
, const struct glsl_type
*type
,
217 bool default_to_smooth_interp
)
219 if (glsl_type_is_integer(type
))
220 return INTERP_MODE_FLAT
;
221 else if (var
->data
.interpolation
!= INTERP_MODE_NONE
)
222 return var
->data
.interpolation
;
223 else if (default_to_smooth_interp
)
224 return INTERP_MODE_SMOOTH
;
226 return INTERP_MODE_NONE
;
229 #define INTERPOLATE_LOC_SAMPLE 0
230 #define INTERPOLATE_LOC_CENTROID 1
231 #define INTERPOLATE_LOC_CENTER 2
234 get_interp_loc(nir_variable
*var
)
236 if (var
->data
.sample
)
237 return INTERPOLATE_LOC_SAMPLE
;
238 else if (var
->data
.centroid
)
239 return INTERPOLATE_LOC_CENTROID
;
241 return INTERPOLATE_LOC_CENTER
;
245 is_packing_supported_for_type(const struct glsl_type
*type
)
247 /* We ignore complex types such as arrays, matrices, structs and bitsizes
248 * other then 32bit. All other vector types should have been split into
249 * scalar variables by the lower_io_to_scalar pass. The only exception
250 * should be OpenGL xfb varyings.
251 * TODO: add support for more complex types?
253 return glsl_type_is_scalar(type
) && glsl_type_is_32bit(type
);
256 struct assigned_comps
264 /* Packing arrays and dual slot varyings is difficult so to avoid complex
265 * algorithms this function just assigns them their existing location for now.
266 * TODO: allow better packing of complex types.
269 get_unmoveable_components_masks(struct exec_list
*var_list
,
270 struct assigned_comps
*comps
,
271 gl_shader_stage stage
,
272 bool default_to_smooth_interp
)
274 nir_foreach_variable_safe(var
, var_list
) {
275 assert(var
->data
.location
>= 0);
277 /* Only remap things that aren't built-ins. */
278 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
279 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
281 const struct glsl_type
*type
= var
->type
;
282 if (nir_is_per_vertex_io(var
, stage
)) {
283 assert(glsl_type_is_array(type
));
284 type
= glsl_get_array_element(type
);
287 /* If we can pack this varying then don't mark the components as
290 if (is_packing_supported_for_type(type
))
293 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
296 glsl_type_is_vector_or_scalar(glsl_without_array(type
)) ?
297 glsl_get_vector_elements(glsl_without_array(type
)) : 4;
299 bool dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
300 unsigned slots
= glsl_count_attribute_slots(type
, false);
301 unsigned dmul
= glsl_type_is_64bit(glsl_without_array(type
)) ? 2 : 1;
302 unsigned comps_slot2
= 0;
303 for (unsigned i
= 0; i
< slots
; i
++) {
306 comps
[location
+ i
].comps
|= ((1 << comps_slot2
) - 1);
308 unsigned num_comps
= 4 - var
->data
.location_frac
;
309 comps_slot2
= (elements
* dmul
) - num_comps
;
311 /* Assume ARB_enhanced_layouts packing rules for doubles */
312 assert(var
->data
.location_frac
== 0 ||
313 var
->data
.location_frac
== 2);
314 assert(comps_slot2
<= 4);
316 comps
[location
+ i
].comps
|=
317 ((1 << num_comps
) - 1) << var
->data
.location_frac
;
320 comps
[location
+ i
].comps
|=
321 ((1 << (elements
* dmul
)) - 1) << var
->data
.location_frac
;
324 comps
[location
+ i
].interp_type
=
325 get_interp_type(var
, type
, default_to_smooth_interp
);
326 comps
[location
+ i
].interp_loc
= get_interp_loc(var
);
327 comps
[location
+ i
].is_32bit
=
328 glsl_type_is_32bit(glsl_without_array(type
));
341 mark_all_used_slots(nir_variable
*var
, uint64_t *slots_used
,
342 uint64_t slots_used_mask
, unsigned num_slots
)
344 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
346 slots_used
[var
->data
.patch
? 1 : 0] |= slots_used_mask
&
347 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
351 mark_used_slot(nir_variable
*var
, uint64_t *slots_used
, unsigned offset
)
353 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
355 slots_used
[var
->data
.patch
? 1 : 0] |=
356 BITFIELD64_BIT(var
->data
.location
- loc_offset
+ offset
);
360 remap_slots_and_components(struct exec_list
*var_list
, gl_shader_stage stage
,
361 struct varying_loc (*remap
)[4],
362 uint64_t *slots_used
, uint64_t *out_slots_read
,
363 uint32_t *p_slots_used
, uint32_t *p_out_slots_read
)
365 uint64_t out_slots_read_tmp
[2] = {0};
366 uint64_t slots_used_tmp
[2] = {0};
368 /* We don't touch builtins so just copy the bitmask */
369 slots_used_tmp
[0] = *slots_used
& BITFIELD64_RANGE(0, VARYING_SLOT_VAR0
);
371 nir_foreach_variable(var
, var_list
) {
372 assert(var
->data
.location
>= 0);
374 /* Only remap things that aren't built-ins */
375 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
376 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
378 const struct glsl_type
*type
= var
->type
;
379 if (nir_is_per_vertex_io(var
, stage
)) {
380 assert(glsl_type_is_array(type
));
381 type
= glsl_get_array_element(type
);
384 unsigned num_slots
= glsl_count_attribute_slots(type
, false);
385 bool used_across_stages
= false;
386 bool outputs_read
= false;
388 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
389 struct varying_loc
*new_loc
= &remap
[location
][var
->data
.location_frac
];
391 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
392 uint64_t used
= var
->data
.patch
? *p_slots_used
: *slots_used
;
394 var
->data
.patch
? *p_out_slots_read
: *out_slots_read
;
396 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
399 used_across_stages
= true;
401 if (slots
& outs_used
)
404 if (new_loc
->location
) {
405 var
->data
.location
= new_loc
->location
;
406 var
->data
.location_frac
= new_loc
->component
;
409 if (var
->data
.always_active_io
) {
410 /* We can't apply link time optimisations (specifically array
411 * splitting) to these so we need to copy the existing mask
412 * otherwise we will mess up the mask for things like partially
415 if (used_across_stages
)
416 mark_all_used_slots(var
, slots_used_tmp
, used
, num_slots
);
419 mark_all_used_slots(var
, out_slots_read_tmp
, outs_used
,
423 for (unsigned i
= 0; i
< num_slots
; i
++) {
424 if (used_across_stages
)
425 mark_used_slot(var
, slots_used_tmp
, i
);
428 mark_used_slot(var
, out_slots_read_tmp
, i
);
434 *slots_used
= slots_used_tmp
[0];
435 *out_slots_read
= out_slots_read_tmp
[0];
436 *p_slots_used
= slots_used_tmp
[1];
437 *p_out_slots_read
= out_slots_read_tmp
[1];
440 struct varying_component
{
450 cmp_varying_component(const void *comp1_v
, const void *comp2_v
)
452 struct varying_component
*comp1
= (struct varying_component
*) comp1_v
;
453 struct varying_component
*comp2
= (struct varying_component
*) comp2_v
;
455 /* We want patches to be order at the end of the array */
456 if (comp1
->is_patch
!= comp2
->is_patch
)
457 return comp1
->is_patch
? 1 : -1;
459 /* We can only pack varyings with matching interpolation types so group
462 if (comp1
->interp_type
!= comp2
->interp_type
)
463 return comp1
->interp_type
- comp2
->interp_type
;
465 /* Interpolation loc must match also. */
466 if (comp1
->interp_loc
!= comp2
->interp_loc
)
467 return comp1
->interp_loc
- comp2
->interp_loc
;
469 /* If everything else matches just use the original location to sort */
470 return comp1
->var
->data
.location
- comp2
->var
->data
.location
;
474 gather_varying_component_info(nir_shader
*consumer
,
475 struct varying_component
**varying_comp_info
,
476 unsigned *varying_comp_info_size
,
477 bool default_to_smooth_interp
)
479 unsigned store_varying_info_idx
[MAX_VARYINGS_INCL_PATCH
][4] = {{0}};
480 unsigned num_of_comps_to_pack
= 0;
482 /* Count the number of varying that can be packed and create a mapping
483 * of those varyings to the array we will pass to qsort.
485 nir_foreach_variable(var
, &consumer
->inputs
) {
487 /* Only remap things that aren't builtins. */
488 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
489 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
491 /* We can't repack xfb varyings. */
492 if (var
->data
.always_active_io
)
495 const struct glsl_type
*type
= var
->type
;
496 if (nir_is_per_vertex_io(var
, consumer
->info
.stage
)) {
497 assert(glsl_type_is_array(type
));
498 type
= glsl_get_array_element(type
);
501 if (!is_packing_supported_for_type(type
))
504 unsigned loc
= var
->data
.location
- VARYING_SLOT_VAR0
;
505 store_varying_info_idx
[loc
][var
->data
.location_frac
] =
506 ++num_of_comps_to_pack
;
510 *varying_comp_info_size
= num_of_comps_to_pack
;
511 *varying_comp_info
= rzalloc_array(NULL
, struct varying_component
,
512 num_of_comps_to_pack
);
514 nir_function_impl
*impl
= nir_shader_get_entrypoint(consumer
);
516 /* Walk over the shader and populate the varying component info array */
517 nir_foreach_block(block
, impl
) {
518 nir_foreach_instr(instr
, block
) {
519 if (instr
->type
!= nir_instr_type_intrinsic
)
522 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
523 if (intr
->intrinsic
!= nir_intrinsic_load_deref
&&
524 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_centroid
&&
525 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_sample
&&
526 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_offset
&&
527 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_vertex
)
530 nir_deref_instr
*deref
= nir_src_as_deref(intr
->src
[0]);
531 if (deref
->mode
!= nir_var_shader_in
)
534 /* We only remap things that aren't builtins. */
535 nir_variable
*in_var
= nir_deref_instr_get_variable(deref
);
536 if (in_var
->data
.location
< VARYING_SLOT_VAR0
)
539 unsigned location
= in_var
->data
.location
- VARYING_SLOT_VAR0
;
540 if (location
>= MAX_VARYINGS_INCL_PATCH
)
543 unsigned var_info_idx
=
544 store_varying_info_idx
[location
][in_var
->data
.location_frac
];
548 struct varying_component
*vc_info
=
549 &(*varying_comp_info
)[var_info_idx
-1];
551 if (!vc_info
->initialised
) {
552 const struct glsl_type
*type
= in_var
->type
;
553 if (nir_is_per_vertex_io(in_var
, consumer
->info
.stage
)) {
554 assert(glsl_type_is_array(type
));
555 type
= glsl_get_array_element(type
);
558 vc_info
->var
= in_var
;
559 vc_info
->interp_type
=
560 get_interp_type(in_var
, type
, default_to_smooth_interp
);
561 vc_info
->interp_loc
= get_interp_loc(in_var
);
562 vc_info
->is_32bit
= glsl_type_is_32bit(type
);
563 vc_info
->is_patch
= in_var
->data
.patch
;
570 assign_remap_locations(struct varying_loc (*remap
)[4],
571 struct assigned_comps
*assigned_comps
,
572 struct varying_component
*info
,
573 unsigned *cursor
, unsigned *comp
,
574 unsigned max_location
)
576 unsigned tmp_cursor
= *cursor
;
577 unsigned tmp_comp
= *comp
;
579 for (; tmp_cursor
< max_location
; tmp_cursor
++) {
581 if (assigned_comps
[tmp_cursor
].comps
) {
582 /* We can only pack varyings with matching interpolation types,
583 * interpolation loc must match also.
584 * TODO: i965 can handle interpolation locations that don't match,
585 * but the radeonsi nir backend handles everything as vec4s and so
586 * expects this to be the same for all components. We could make this
587 * check driver specfific or drop it if NIR ever become the only
590 if (assigned_comps
[tmp_cursor
].interp_type
!= info
->interp_type
||
591 assigned_comps
[tmp_cursor
].interp_loc
!= info
->interp_loc
) {
596 /* We can only pack varyings with matching types, and the current
597 * algorithm only supports packing 32-bit.
599 if (!assigned_comps
[tmp_cursor
].is_32bit
) {
604 while (tmp_comp
< 4 &&
605 (assigned_comps
[tmp_cursor
].comps
& (1 << tmp_comp
))) {
615 unsigned location
= info
->var
->data
.location
- VARYING_SLOT_VAR0
;
617 /* Once we have assigned a location mark it as used */
618 assigned_comps
[tmp_cursor
].comps
|= (1 << tmp_comp
);
619 assigned_comps
[tmp_cursor
].interp_type
= info
->interp_type
;
620 assigned_comps
[tmp_cursor
].interp_loc
= info
->interp_loc
;
621 assigned_comps
[tmp_cursor
].is_32bit
= info
->is_32bit
;
623 /* Assign remap location */
624 remap
[location
][info
->var
->data
.location_frac
].component
= tmp_comp
++;
625 remap
[location
][info
->var
->data
.location_frac
].location
=
626 tmp_cursor
+ VARYING_SLOT_VAR0
;
631 *cursor
= tmp_cursor
;
635 /* If there are empty components in the slot compact the remaining components
636 * as close to component 0 as possible. This will make it easier to fill the
637 * empty components with components from a different slot in a following pass.
640 compact_components(nir_shader
*producer
, nir_shader
*consumer
,
641 struct assigned_comps
*assigned_comps
,
642 bool default_to_smooth_interp
)
644 struct exec_list
*input_list
= &consumer
->inputs
;
645 struct exec_list
*output_list
= &producer
->outputs
;
646 struct varying_loc remap
[MAX_VARYINGS_INCL_PATCH
][4] = {{{0}, {0}}};
647 struct varying_component
*varying_comp_info
;
648 unsigned varying_comp_info_size
;
650 /* Gather varying component info */
651 gather_varying_component_info(consumer
, &varying_comp_info
,
652 &varying_comp_info_size
,
653 default_to_smooth_interp
);
655 /* Sort varying components. */
656 qsort(varying_comp_info
, varying_comp_info_size
,
657 sizeof(struct varying_component
), cmp_varying_component
);
662 /* Set the remap array based on the sorted components */
663 for (unsigned i
= 0; i
< varying_comp_info_size
; i
++ ) {
664 struct varying_component
*info
= &varying_comp_info
[i
];
666 assert(info
->is_patch
|| cursor
< MAX_VARYING
);
667 if (info
->is_patch
) {
668 /* The list should be sorted with all non-patch inputs first followed
669 * by patch inputs. When we hit our first patch input, we need to
670 * reset the cursor to MAX_VARYING so we put them in the right slot.
672 if (cursor
< MAX_VARYING
) {
673 cursor
= MAX_VARYING
;
677 assign_remap_locations(remap
, assigned_comps
, info
,
678 &cursor
, &comp
, MAX_VARYINGS_INCL_PATCH
);
680 assign_remap_locations(remap
, assigned_comps
, info
,
681 &cursor
, &comp
, MAX_VARYING
);
683 /* Check if we failed to assign a remap location. This can happen if
684 * for example there are a bunch of unmovable components with
685 * mismatching interpolation types causing us to skip over locations
686 * that would have been useful for packing later components.
687 * The solution is to iterate over the locations again (this should
688 * happen very rarely in practice).
690 if (cursor
== MAX_VARYING
) {
693 assign_remap_locations(remap
, assigned_comps
, info
,
694 &cursor
, &comp
, MAX_VARYING
);
699 ralloc_free(varying_comp_info
);
703 remap_slots_and_components(input_list
, consumer
->info
.stage
, remap
,
704 &consumer
->info
.inputs_read
, &zero
,
705 &consumer
->info
.patch_inputs_read
, &zero32
);
706 remap_slots_and_components(output_list
, producer
->info
.stage
, remap
,
707 &producer
->info
.outputs_written
,
708 &producer
->info
.outputs_read
,
709 &producer
->info
.patch_outputs_written
,
710 &producer
->info
.patch_outputs_read
);
713 /* We assume that this has been called more-or-less directly after
714 * remove_unused_varyings. At this point, all of the varyings that we
715 * aren't going to be using have been completely removed and the
716 * inputs_read and outputs_written fields in nir_shader_info reflect
717 * this. Therefore, the total set of valid slots is the OR of the two
718 * sets of varyings; this accounts for varyings which one side may need
719 * to read/write even if the other doesn't. This can happen if, for
720 * instance, an array is used indirectly from one side causing it to be
721 * unsplittable but directly from the other.
724 nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
725 bool default_to_smooth_interp
)
727 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
728 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
730 struct assigned_comps assigned_comps
[MAX_VARYINGS_INCL_PATCH
] = {{0}};
732 get_unmoveable_components_masks(&producer
->outputs
, assigned_comps
,
733 producer
->info
.stage
,
734 default_to_smooth_interp
);
735 get_unmoveable_components_masks(&consumer
->inputs
, assigned_comps
,
736 consumer
->info
.stage
,
737 default_to_smooth_interp
);
739 compact_components(producer
, consumer
, assigned_comps
,
740 default_to_smooth_interp
);
744 * Mark XFB varyings as always_active_io in the consumer so the linking opts
748 nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
)
750 nir_variable
*input_vars
[MAX_VARYING
] = { 0 };
752 nir_foreach_variable(var
, &consumer
->inputs
) {
753 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
754 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
756 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
757 input_vars
[location
] = var
;
761 nir_foreach_variable(var
, &producer
->outputs
) {
762 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
763 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
765 if (!var
->data
.always_active_io
)
768 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
769 if (input_vars
[location
]) {
770 input_vars
[location
]->data
.always_active_io
= true;
777 does_varying_match(nir_variable
*out_var
, nir_variable
*in_var
)
779 return in_var
->data
.location
== out_var
->data
.location
&&
780 in_var
->data
.location_frac
== out_var
->data
.location_frac
;
783 static nir_variable
*
784 get_matching_input_var(nir_shader
*consumer
, nir_variable
*out_var
)
786 nir_foreach_variable(var
, &consumer
->inputs
) {
787 if (does_varying_match(out_var
, var
))
795 can_replace_varying(nir_variable
*out_var
)
797 /* Skip types that require more complex handling.
798 * TODO: add support for these types.
800 if (glsl_type_is_array(out_var
->type
) ||
801 glsl_type_is_dual_slot(out_var
->type
) ||
802 glsl_type_is_matrix(out_var
->type
) ||
803 glsl_type_is_struct_or_ifc(out_var
->type
))
806 /* Limit this pass to scalars for now to keep things simple. Most varyings
807 * should have been lowered to scalars at this point anyway.
809 if (!glsl_type_is_scalar(out_var
->type
))
812 if (out_var
->data
.location
< VARYING_SLOT_VAR0
||
813 out_var
->data
.location
- VARYING_SLOT_VAR0
>= MAX_VARYING
)
820 replace_constant_input(nir_shader
*shader
, nir_intrinsic_instr
*store_intr
)
822 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
825 nir_builder_init(&b
, impl
);
827 nir_variable
*out_var
=
828 nir_deref_instr_get_variable(nir_src_as_deref(store_intr
->src
[0]));
830 bool progress
= false;
831 nir_foreach_block(block
, impl
) {
832 nir_foreach_instr(instr
, block
) {
833 if (instr
->type
!= nir_instr_type_intrinsic
)
836 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
837 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
840 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
841 if (in_deref
->mode
!= nir_var_shader_in
)
844 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
846 if (!does_varying_match(out_var
, in_var
))
849 b
.cursor
= nir_before_instr(instr
);
851 nir_load_const_instr
*out_const
=
852 nir_instr_as_load_const(store_intr
->src
[1].ssa
->parent_instr
);
854 /* Add new const to replace the input */
855 nir_ssa_def
*nconst
= nir_build_imm(&b
, store_intr
->num_components
,
856 intr
->dest
.ssa
.bit_size
,
859 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(nconst
));
869 replace_duplicate_input(nir_shader
*shader
, nir_variable
*input_var
,
870 nir_intrinsic_instr
*dup_store_intr
)
874 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
877 nir_builder_init(&b
, impl
);
879 nir_variable
*dup_out_var
=
880 nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr
->src
[0]));
882 bool progress
= false;
883 nir_foreach_block(block
, impl
) {
884 nir_foreach_instr(instr
, block
) {
885 if (instr
->type
!= nir_instr_type_intrinsic
)
888 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
889 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
892 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
893 if (in_deref
->mode
!= nir_var_shader_in
)
896 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
898 if (!does_varying_match(dup_out_var
, in_var
) ||
899 in_var
->data
.interpolation
!= input_var
->data
.interpolation
||
900 get_interp_loc(in_var
) != get_interp_loc(input_var
))
903 b
.cursor
= nir_before_instr(instr
);
905 nir_ssa_def
*load
= nir_load_var(&b
, input_var
);
906 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(load
));
916 nir_link_opt_varyings(nir_shader
*producer
, nir_shader
*consumer
)
918 /* TODO: Add support for more shader stage combinations */
919 if (consumer
->info
.stage
!= MESA_SHADER_FRAGMENT
||
920 (producer
->info
.stage
!= MESA_SHADER_VERTEX
&&
921 producer
->info
.stage
!= MESA_SHADER_TESS_EVAL
))
924 bool progress
= false;
926 nir_function_impl
*impl
= nir_shader_get_entrypoint(producer
);
928 struct hash_table
*varying_values
= _mesa_pointer_hash_table_create(NULL
);
930 /* If we find a store in the last block of the producer we can be sure this
931 * is the only possible value for this output.
933 nir_block
*last_block
= nir_impl_last_block(impl
);
934 nir_foreach_instr_reverse(instr
, last_block
) {
935 if (instr
->type
!= nir_instr_type_intrinsic
)
938 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
940 if (intr
->intrinsic
!= nir_intrinsic_store_deref
)
943 nir_deref_instr
*out_deref
= nir_src_as_deref(intr
->src
[0]);
944 if (out_deref
->mode
!= nir_var_shader_out
)
947 nir_variable
*out_var
= nir_deref_instr_get_variable(out_deref
);
948 if (!can_replace_varying(out_var
))
951 if (intr
->src
[1].ssa
->parent_instr
->type
== nir_instr_type_load_const
) {
952 progress
|= replace_constant_input(consumer
, intr
);
954 struct hash_entry
*entry
=
955 _mesa_hash_table_search(varying_values
, intr
->src
[1].ssa
);
957 progress
|= replace_duplicate_input(consumer
,
958 (nir_variable
*) entry
->data
,
961 nir_variable
*in_var
= get_matching_input_var(consumer
, out_var
);
963 _mesa_hash_table_insert(varying_values
, intr
->src
[1].ssa
,
970 _mesa_hash_table_destroy(varying_values
, NULL
);
975 /* TODO any better helper somewhere to sort a list? */
978 insert_sorted(struct exec_list
*var_list
, nir_variable
*new_var
)
980 nir_foreach_variable(var
, var_list
) {
981 if (var
->data
.location
> new_var
->data
.location
) {
982 exec_node_insert_node_before(&var
->node
, &new_var
->node
);
986 exec_list_push_tail(var_list
, &new_var
->node
);
990 sort_varyings(struct exec_list
*var_list
)
992 struct exec_list new_list
;
993 exec_list_make_empty(&new_list
);
994 nir_foreach_variable_safe(var
, var_list
) {
995 exec_node_remove(&var
->node
);
996 insert_sorted(&new_list
, var
);
998 exec_list_move_nodes_to(&new_list
, var_list
);
1002 nir_assign_io_var_locations(struct exec_list
*var_list
, unsigned *size
,
1003 gl_shader_stage stage
)
1005 unsigned location
= 0;
1006 unsigned assigned_locations
[VARYING_SLOT_TESS_MAX
];
1007 uint64_t processed_locs
[2] = {0};
1009 sort_varyings(var_list
);
1011 int UNUSED last_loc
= 0;
1012 bool last_partial
= false;
1013 nir_foreach_variable(var
, var_list
) {
1014 const struct glsl_type
*type
= var
->type
;
1015 if (nir_is_per_vertex_io(var
, stage
)) {
1016 assert(glsl_type_is_array(type
));
1017 type
= glsl_get_array_element(type
);
1021 if (var
->data
.mode
== nir_var_shader_in
&& stage
== MESA_SHADER_VERTEX
)
1022 base
= VERT_ATTRIB_GENERIC0
;
1023 else if (var
->data
.mode
== nir_var_shader_out
&&
1024 stage
== MESA_SHADER_FRAGMENT
)
1025 base
= FRAG_RESULT_DATA0
;
1027 base
= VARYING_SLOT_VAR0
;
1030 if (var
->data
.compact
) {
1031 /* compact variables must be arrays of scalars */
1032 assert(glsl_type_is_array(type
));
1033 assert(glsl_type_is_scalar(glsl_get_array_element(type
)));
1034 unsigned start
= 4 * location
+ var
->data
.location_frac
;
1035 unsigned end
= start
+ glsl_get_length(type
);
1036 var_size
= end
/ 4 - location
;
1037 last_partial
= end
% 4 != 0;
1039 /* Compact variables bypass the normal varying compacting pass,
1040 * which means they cannot be in the same vec4 slot as a normal
1041 * variable. If part of the current slot is taken up by a compact
1042 * variable, we need to go to the next one.
1046 last_partial
= false;
1048 var_size
= glsl_count_attribute_slots(type
, false);
1051 /* Builtins don't allow component packing so we only need to worry about
1052 * user defined varyings sharing the same location.
1054 bool processed
= false;
1055 if (var
->data
.location
>= base
) {
1056 unsigned glsl_location
= var
->data
.location
- base
;
1058 for (unsigned i
= 0; i
< var_size
; i
++) {
1059 if (processed_locs
[var
->data
.index
] &
1060 ((uint64_t)1 << (glsl_location
+ i
)))
1063 processed_locs
[var
->data
.index
] |=
1064 ((uint64_t)1 << (glsl_location
+ i
));
1068 /* Because component packing allows varyings to share the same location
1069 * we may have already have processed this location.
1072 unsigned driver_location
= assigned_locations
[var
->data
.location
];
1073 var
->data
.driver_location
= driver_location
;
1075 /* An array may be packed such that is crosses multiple other arrays
1076 * or variables, we need to make sure we have allocated the elements
1077 * consecutively if the previously proccessed var was shorter than
1078 * the current array we are processing.
1080 * NOTE: The code below assumes the var list is ordered in ascending
1083 assert(last_loc
<= var
->data
.location
);
1084 last_loc
= var
->data
.location
;
1085 unsigned last_slot_location
= driver_location
+ var_size
;
1086 if (last_slot_location
> location
) {
1087 unsigned num_unallocated_slots
= last_slot_location
- location
;
1088 unsigned first_unallocated_slot
= var_size
- num_unallocated_slots
;
1089 for (unsigned i
= first_unallocated_slot
; i
< var_size
; i
++) {
1090 assigned_locations
[var
->data
.location
+ i
] = location
;
1097 for (unsigned i
= 0; i
< var_size
; i
++) {
1098 assigned_locations
[var
->data
.location
+ i
] = location
+ i
;
1101 var
->data
.driver_location
= location
;
1102 location
+= var_size
;