2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 #include "util/hash_table.h"
29 /* This file contains various little helpers for doing simple linking in
30 * NIR. Eventually, we'll probably want a full-blown varying packing
31 * implementation in here. Right now, it just deletes unused things.
35 * Returns the bits in the inputs_read, outputs_written, or
36 * system_values_read bitfield corresponding to this variable.
39 get_variable_io_mask(nir_variable
*var
, gl_shader_stage stage
)
41 if (var
->data
.location
< 0)
44 unsigned location
= var
->data
.patch
?
45 var
->data
.location
- VARYING_SLOT_PATCH0
: var
->data
.location
;
47 assert(var
->data
.mode
== nir_var_shader_in
||
48 var
->data
.mode
== nir_var_shader_out
||
49 var
->data
.mode
== nir_var_system_value
);
50 assert(var
->data
.location
>= 0);
52 const struct glsl_type
*type
= var
->type
;
53 if (nir_is_per_vertex_io(var
, stage
)) {
54 assert(glsl_type_is_array(type
));
55 type
= glsl_get_array_element(type
);
58 unsigned slots
= glsl_count_attribute_slots(type
, false);
59 return ((1ull << slots
) - 1) << location
;
63 tcs_add_output_reads(nir_shader
*shader
, uint64_t *read
, uint64_t *patches_read
)
65 nir_foreach_function(function
, shader
) {
69 nir_foreach_block(block
, function
->impl
) {
70 nir_foreach_instr(instr
, block
) {
71 if (instr
->type
!= nir_instr_type_intrinsic
)
74 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
75 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
78 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
79 if (deref
->mode
!= nir_var_shader_out
)
82 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
83 if (var
->data
.patch
) {
84 patches_read
[var
->data
.location_frac
] |=
85 get_variable_io_mask(var
, shader
->info
.stage
);
87 read
[var
->data
.location_frac
] |=
88 get_variable_io_mask(var
, shader
->info
.stage
);
96 * Helper for removing unused shader I/O variables, by demoting them to global
97 * variables (which may then by dead code eliminated).
101 * progress = nir_remove_unused_io_vars(producer,
102 * &producer->outputs,
103 * read, patches_read) ||
106 * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
107 * representing each .location_frac used. Note that for vector variables,
108 * only the first channel (.location_frac) is examined for deciding if the
112 nir_remove_unused_io_vars(nir_shader
*shader
, struct exec_list
*var_list
,
113 uint64_t *used_by_other_stage
,
114 uint64_t *used_by_other_stage_patches
)
116 bool progress
= false;
119 nir_foreach_variable_safe(var
, var_list
) {
121 used
= used_by_other_stage_patches
;
123 used
= used_by_other_stage
;
125 if (var
->data
.location
< VARYING_SLOT_VAR0
&& var
->data
.location
>= 0)
128 if (var
->data
.always_active_io
)
131 if (var
->data
.explicit_xfb_buffer
)
134 uint64_t other_stage
= used
[var
->data
.location_frac
];
136 if (!(other_stage
& get_variable_io_mask(var
, shader
->info
.stage
))) {
137 /* This one is invalid, make it a global variable instead */
138 var
->data
.location
= 0;
139 var
->data
.mode
= nir_var_shader_temp
;
141 exec_node_remove(&var
->node
);
142 exec_list_push_tail(&shader
->globals
, &var
->node
);
149 nir_fixup_deref_modes(shader
);
155 nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
)
157 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
158 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
160 uint64_t read
[4] = { 0 }, written
[4] = { 0 };
161 uint64_t patches_read
[4] = { 0 }, patches_written
[4] = { 0 };
163 nir_foreach_variable(var
, &producer
->outputs
) {
164 if (var
->data
.patch
) {
165 patches_written
[var
->data
.location_frac
] |=
166 get_variable_io_mask(var
, producer
->info
.stage
);
168 written
[var
->data
.location_frac
] |=
169 get_variable_io_mask(var
, producer
->info
.stage
);
173 nir_foreach_variable(var
, &consumer
->inputs
) {
174 if (var
->data
.patch
) {
175 patches_read
[var
->data
.location_frac
] |=
176 get_variable_io_mask(var
, consumer
->info
.stage
);
178 read
[var
->data
.location_frac
] |=
179 get_variable_io_mask(var
, consumer
->info
.stage
);
183 /* Each TCS invocation can read data written by other TCS invocations,
184 * so even if the outputs are not used by the TES we must also make
185 * sure they are not read by the TCS before demoting them to globals.
187 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
)
188 tcs_add_output_reads(producer
, read
, patches_read
);
190 bool progress
= false;
191 progress
= nir_remove_unused_io_vars(producer
, &producer
->outputs
, read
,
194 progress
= nir_remove_unused_io_vars(consumer
, &consumer
->inputs
, written
,
195 patches_written
) || progress
;
201 get_interp_type(nir_variable
*var
, const struct glsl_type
*type
,
202 bool default_to_smooth_interp
)
204 if (glsl_type_is_integer(type
))
205 return INTERP_MODE_FLAT
;
206 else if (var
->data
.interpolation
!= INTERP_MODE_NONE
)
207 return var
->data
.interpolation
;
208 else if (default_to_smooth_interp
)
209 return INTERP_MODE_SMOOTH
;
211 return INTERP_MODE_NONE
;
214 #define INTERPOLATE_LOC_SAMPLE 0
215 #define INTERPOLATE_LOC_CENTROID 1
216 #define INTERPOLATE_LOC_CENTER 2
219 get_interp_loc(nir_variable
*var
)
221 if (var
->data
.sample
)
222 return INTERPOLATE_LOC_SAMPLE
;
223 else if (var
->data
.centroid
)
224 return INTERPOLATE_LOC_CENTROID
;
226 return INTERPOLATE_LOC_CENTER
;
230 is_packing_supported_for_type(const struct glsl_type
*type
)
232 /* We ignore complex types such as arrays, matrices, structs and bitsizes
233 * other then 32bit. All other vector types should have been split into
234 * scalar variables by the lower_io_to_scalar pass. The only exception
235 * should be OpenGL xfb varyings.
236 * TODO: add support for more complex types?
238 return glsl_type_is_scalar(type
) && glsl_type_is_32bit(type
);
241 struct assigned_comps
249 /* Packing arrays and dual slot varyings is difficult so to avoid complex
250 * algorithms this function just assigns them their existing location for now.
251 * TODO: allow better packing of complex types.
254 get_unmoveable_components_masks(struct exec_list
*var_list
,
255 struct assigned_comps
*comps
,
256 gl_shader_stage stage
,
257 bool default_to_smooth_interp
)
259 nir_foreach_variable_safe(var
, var_list
) {
260 assert(var
->data
.location
>= 0);
262 /* Only remap things that aren't built-ins. */
263 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
264 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
266 const struct glsl_type
*type
= var
->type
;
267 if (nir_is_per_vertex_io(var
, stage
)) {
268 assert(glsl_type_is_array(type
));
269 type
= glsl_get_array_element(type
);
272 /* If we can pack this varying then don't mark the components as
275 if (is_packing_supported_for_type(type
))
278 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
281 glsl_type_is_vector_or_scalar(glsl_without_array(type
)) ?
282 glsl_get_vector_elements(glsl_without_array(type
)) : 4;
284 bool dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
285 unsigned slots
= glsl_count_attribute_slots(type
, false);
286 unsigned dmul
= glsl_type_is_64bit(glsl_without_array(type
)) ? 2 : 1;
287 unsigned comps_slot2
= 0;
288 for (unsigned i
= 0; i
< slots
; i
++) {
291 comps
[location
+ i
].comps
|= ((1 << comps_slot2
) - 1);
293 unsigned num_comps
= 4 - var
->data
.location_frac
;
294 comps_slot2
= (elements
* dmul
) - num_comps
;
296 /* Assume ARB_enhanced_layouts packing rules for doubles */
297 assert(var
->data
.location_frac
== 0 ||
298 var
->data
.location_frac
== 2);
299 assert(comps_slot2
<= 4);
301 comps
[location
+ i
].comps
|=
302 ((1 << num_comps
) - 1) << var
->data
.location_frac
;
305 comps
[location
+ i
].comps
|=
306 ((1 << (elements
* dmul
)) - 1) << var
->data
.location_frac
;
309 comps
[location
+ i
].interp_type
=
310 get_interp_type(var
, type
, default_to_smooth_interp
);
311 comps
[location
+ i
].interp_loc
= get_interp_loc(var
);
312 comps
[location
+ i
].is_32bit
= glsl_type_is_32bit(type
);
325 mark_all_used_slots(nir_variable
*var
, uint64_t *slots_used
,
326 uint64_t slots_used_mask
, unsigned num_slots
)
328 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
330 slots_used
[var
->data
.patch
? 1 : 0] |= slots_used_mask
&
331 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
335 mark_used_slot(nir_variable
*var
, uint64_t *slots_used
, unsigned offset
)
337 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
339 slots_used
[var
->data
.patch
? 1 : 0] |=
340 BITFIELD64_BIT(var
->data
.location
- loc_offset
+ offset
);
344 remap_slots_and_components(struct exec_list
*var_list
, gl_shader_stage stage
,
345 struct varying_loc (*remap
)[4],
346 uint64_t *slots_used
, uint64_t *out_slots_read
,
347 uint32_t *p_slots_used
, uint32_t *p_out_slots_read
)
349 uint64_t out_slots_read_tmp
[2] = {0};
350 uint64_t slots_used_tmp
[2] = {0};
352 /* We don't touch builtins so just copy the bitmask */
353 slots_used_tmp
[0] = *slots_used
& BITFIELD64_RANGE(0, VARYING_SLOT_VAR0
);
355 nir_foreach_variable(var
, var_list
) {
356 assert(var
->data
.location
>= 0);
358 /* Only remap things that aren't built-ins */
359 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
360 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
362 const struct glsl_type
*type
= var
->type
;
363 if (nir_is_per_vertex_io(var
, stage
)) {
364 assert(glsl_type_is_array(type
));
365 type
= glsl_get_array_element(type
);
368 unsigned num_slots
= glsl_count_attribute_slots(type
, false);
369 bool used_across_stages
= false;
370 bool outputs_read
= false;
372 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
373 struct varying_loc
*new_loc
= &remap
[location
][var
->data
.location_frac
];
375 unsigned loc_offset
= var
->data
.patch
? VARYING_SLOT_PATCH0
: 0;
376 uint64_t used
= var
->data
.patch
? *p_slots_used
: *slots_used
;
378 var
->data
.patch
? *p_out_slots_read
: *out_slots_read
;
380 BITFIELD64_RANGE(var
->data
.location
- loc_offset
, num_slots
);
383 used_across_stages
= true;
385 if (slots
& outs_used
)
388 if (new_loc
->location
) {
389 var
->data
.location
= new_loc
->location
;
390 var
->data
.location_frac
= new_loc
->component
;
393 if (var
->data
.always_active_io
) {
394 /* We can't apply link time optimisations (specifically array
395 * splitting) to these so we need to copy the existing mask
396 * otherwise we will mess up the mask for things like partially
399 if (used_across_stages
)
400 mark_all_used_slots(var
, slots_used_tmp
, used
, num_slots
);
403 mark_all_used_slots(var
, out_slots_read_tmp
, outs_used
,
407 for (unsigned i
= 0; i
< num_slots
; i
++) {
408 if (used_across_stages
)
409 mark_used_slot(var
, slots_used_tmp
, i
);
412 mark_used_slot(var
, out_slots_read_tmp
, i
);
418 *slots_used
= slots_used_tmp
[0];
419 *out_slots_read
= out_slots_read_tmp
[0];
420 *p_slots_used
= slots_used_tmp
[1];
421 *p_out_slots_read
= out_slots_read_tmp
[1];
424 struct varying_component
{
434 cmp_varying_component(const void *comp1_v
, const void *comp2_v
)
436 struct varying_component
*comp1
= (struct varying_component
*) comp1_v
;
437 struct varying_component
*comp2
= (struct varying_component
*) comp2_v
;
439 /* We want patches to be order at the end of the array */
440 if (comp1
->is_patch
!= comp2
->is_patch
)
441 return comp1
->is_patch
? 1 : -1;
443 /* We can only pack varyings with matching interpolation types so group
446 if (comp1
->interp_type
!= comp2
->interp_type
)
447 return comp1
->interp_type
- comp2
->interp_type
;
449 /* Interpolation loc must match also. */
450 if (comp1
->interp_loc
!= comp2
->interp_loc
)
451 return comp1
->interp_loc
- comp2
->interp_loc
;
453 /* If everything else matches just use the original location to sort */
454 return comp1
->var
->data
.location
- comp2
->var
->data
.location
;
458 gather_varying_component_info(nir_shader
*consumer
,
459 struct varying_component
**varying_comp_info
,
460 unsigned *varying_comp_info_size
,
461 bool default_to_smooth_interp
)
463 unsigned store_varying_info_idx
[MAX_VARYINGS_INCL_PATCH
][4] = {0};
464 unsigned num_of_comps_to_pack
= 0;
466 /* Count the number of varying that can be packed and create a mapping
467 * of those varyings to the array we will pass to qsort.
469 nir_foreach_variable(var
, &consumer
->inputs
) {
471 /* Only remap things that aren't builtins. */
472 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
473 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYINGS_INCL_PATCH
) {
475 /* We can't repack xfb varyings. */
476 if (var
->data
.always_active_io
)
479 const struct glsl_type
*type
= var
->type
;
480 if (nir_is_per_vertex_io(var
, consumer
->info
.stage
)) {
481 assert(glsl_type_is_array(type
));
482 type
= glsl_get_array_element(type
);
485 if (!is_packing_supported_for_type(type
))
488 unsigned loc
= var
->data
.location
- VARYING_SLOT_VAR0
;
489 store_varying_info_idx
[loc
][var
->data
.location_frac
] =
490 ++num_of_comps_to_pack
;
494 *varying_comp_info_size
= num_of_comps_to_pack
;
495 *varying_comp_info
= rzalloc_array(NULL
, struct varying_component
,
496 num_of_comps_to_pack
);
498 nir_function_impl
*impl
= nir_shader_get_entrypoint(consumer
);
500 /* Walk over the shader and populate the varying component info array */
501 nir_foreach_block(block
, impl
) {
502 nir_foreach_instr(instr
, block
) {
503 if (instr
->type
!= nir_instr_type_intrinsic
)
506 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
507 if (intr
->intrinsic
!= nir_intrinsic_load_deref
&&
508 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_centroid
&&
509 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_sample
&&
510 intr
->intrinsic
!= nir_intrinsic_interp_deref_at_offset
)
513 nir_deref_instr
*deref
= nir_src_as_deref(intr
->src
[0]);
514 if (deref
->mode
!= nir_var_shader_in
)
517 /* We only remap things that aren't builtins. */
518 nir_variable
*in_var
= nir_deref_instr_get_variable(deref
);
519 if (in_var
->data
.location
< VARYING_SLOT_VAR0
)
522 unsigned location
= in_var
->data
.location
- VARYING_SLOT_VAR0
;
523 if (location
>= MAX_VARYINGS_INCL_PATCH
)
526 unsigned var_info_idx
=
527 store_varying_info_idx
[location
][in_var
->data
.location_frac
];
531 struct varying_component
*vc_info
=
532 &(*varying_comp_info
)[var_info_idx
-1];
534 if (!vc_info
->initialised
) {
535 const struct glsl_type
*type
= in_var
->type
;
536 if (nir_is_per_vertex_io(in_var
, consumer
->info
.stage
)) {
537 assert(glsl_type_is_array(type
));
538 type
= glsl_get_array_element(type
);
541 vc_info
->var
= in_var
;
542 vc_info
->interp_type
=
543 get_interp_type(in_var
, type
, default_to_smooth_interp
);
544 vc_info
->interp_loc
= get_interp_loc(in_var
);
545 vc_info
->is_32bit
= glsl_type_is_32bit(type
);
546 vc_info
->is_patch
= in_var
->data
.patch
;
553 assign_remap_locations(struct varying_loc (*remap
)[4],
554 struct assigned_comps
*assigned_comps
,
555 struct varying_component
*info
,
556 unsigned *cursor
, unsigned *comp
,
557 unsigned max_location
)
559 unsigned tmp_cursor
= *cursor
;
560 unsigned tmp_comp
= *comp
;
562 for (; tmp_cursor
< max_location
; tmp_cursor
++) {
564 if (assigned_comps
[tmp_cursor
].comps
) {
565 /* We can only pack varyings with matching interpolation types,
566 * interpolation loc must match also.
567 * TODO: i965 can handle interpolation locations that don't match,
568 * but the radeonsi nir backend handles everything as vec4s and so
569 * expects this to be the same for all components. We could make this
570 * check driver specfific or drop it if NIR ever become the only
573 if (assigned_comps
[tmp_cursor
].interp_type
!= info
->interp_type
||
574 assigned_comps
[tmp_cursor
].interp_loc
!= info
->interp_loc
) {
579 /* We can only pack varyings with matching types, and the current
580 * algorithm only supports packing 32-bit.
582 if (!assigned_comps
[tmp_cursor
].is_32bit
) {
587 while (tmp_comp
< 4 &&
588 (assigned_comps
[tmp_cursor
].comps
& (1 << tmp_comp
))) {
598 unsigned location
= info
->var
->data
.location
- VARYING_SLOT_VAR0
;
600 /* Once we have assigned a location mark it as used */
601 assigned_comps
[tmp_cursor
].comps
|= (1 << tmp_comp
);
602 assigned_comps
[tmp_cursor
].interp_type
= info
->interp_type
;
603 assigned_comps
[tmp_cursor
].interp_loc
= info
->interp_loc
;
604 assigned_comps
[tmp_cursor
].is_32bit
= info
->is_32bit
;
606 /* Assign remap location */
607 remap
[location
][info
->var
->data
.location_frac
].component
= tmp_comp
++;
608 remap
[location
][info
->var
->data
.location_frac
].location
=
609 tmp_cursor
+ VARYING_SLOT_VAR0
;
614 *cursor
= tmp_cursor
;
618 /* If there are empty components in the slot compact the remaining components
619 * as close to component 0 as possible. This will make it easier to fill the
620 * empty components with components from a different slot in a following pass.
623 compact_components(nir_shader
*producer
, nir_shader
*consumer
,
624 struct assigned_comps
*assigned_comps
,
625 bool default_to_smooth_interp
)
627 struct exec_list
*input_list
= &consumer
->inputs
;
628 struct exec_list
*output_list
= &producer
->outputs
;
629 struct varying_loc remap
[MAX_VARYINGS_INCL_PATCH
][4] = {{{0}, {0}}};
630 struct varying_component
*varying_comp_info
;
631 unsigned varying_comp_info_size
;
633 /* Gather varying component info */
634 gather_varying_component_info(consumer
, &varying_comp_info
,
635 &varying_comp_info_size
,
636 default_to_smooth_interp
);
638 /* Sort varying components. */
639 qsort(varying_comp_info
, varying_comp_info_size
,
640 sizeof(struct varying_component
), cmp_varying_component
);
645 /* Set the remap array based on the sorted components */
646 for (unsigned i
= 0; i
< varying_comp_info_size
; i
++ ) {
647 struct varying_component
*info
= &varying_comp_info
[i
];
649 assert(info
->is_patch
|| cursor
< MAX_VARYING
);
650 if (info
->is_patch
) {
651 /* The list should be sorted with all non-patch inputs first followed
652 * by patch inputs. When we hit our first patch input, we need to
653 * reset the cursor to MAX_VARYING so we put them in the right slot.
655 if (cursor
< MAX_VARYING
) {
656 cursor
= MAX_VARYING
;
660 assign_remap_locations(remap
, assigned_comps
, info
,
661 &cursor
, &comp
, MAX_VARYINGS_INCL_PATCH
);
663 assign_remap_locations(remap
, assigned_comps
, info
,
664 &cursor
, &comp
, MAX_VARYING
);
666 /* Check if we failed to assign a remap location. This can happen if
667 * for example there are a bunch of unmovable components with
668 * mismatching interpolation types causing us to skip over locations
669 * that would have been useful for packing later components.
670 * The solution is to iterate over the locations again (this should
671 * happen very rarely in practice).
673 if (cursor
== MAX_VARYING
) {
676 assign_remap_locations(remap
, assigned_comps
, info
,
677 &cursor
, &comp
, MAX_VARYING
);
682 ralloc_free(varying_comp_info
);
686 remap_slots_and_components(input_list
, consumer
->info
.stage
, remap
,
687 &consumer
->info
.inputs_read
, &zero
,
688 &consumer
->info
.patch_inputs_read
, &zero32
);
689 remap_slots_and_components(output_list
, producer
->info
.stage
, remap
,
690 &producer
->info
.outputs_written
,
691 &producer
->info
.outputs_read
,
692 &producer
->info
.patch_outputs_written
,
693 &producer
->info
.patch_outputs_read
);
696 /* We assume that this has been called more-or-less directly after
697 * remove_unused_varyings. At this point, all of the varyings that we
698 * aren't going to be using have been completely removed and the
699 * inputs_read and outputs_written fields in nir_shader_info reflect
700 * this. Therefore, the total set of valid slots is the OR of the two
701 * sets of varyings; this accounts for varyings which one side may need
702 * to read/write even if the other doesn't. This can happen if, for
703 * instance, an array is used indirectly from one side causing it to be
704 * unsplittable but directly from the other.
707 nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
708 bool default_to_smooth_interp
)
710 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
711 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
713 struct assigned_comps assigned_comps
[MAX_VARYINGS_INCL_PATCH
] = {0};
715 get_unmoveable_components_masks(&producer
->outputs
, assigned_comps
,
716 producer
->info
.stage
,
717 default_to_smooth_interp
);
718 get_unmoveable_components_masks(&consumer
->inputs
, assigned_comps
,
719 consumer
->info
.stage
,
720 default_to_smooth_interp
);
722 compact_components(producer
, consumer
, assigned_comps
,
723 default_to_smooth_interp
);
727 * Mark XFB varyings as always_active_io in the consumer so the linking opts
731 nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
)
733 nir_variable
*input_vars
[MAX_VARYING
] = { 0 };
735 nir_foreach_variable(var
, &consumer
->inputs
) {
736 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
737 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
739 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
740 input_vars
[location
] = var
;
744 nir_foreach_variable(var
, &producer
->outputs
) {
745 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
746 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
748 if (!var
->data
.always_active_io
)
751 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
752 if (input_vars
[location
]) {
753 input_vars
[location
]->data
.always_active_io
= true;
760 does_varying_match(nir_variable
*out_var
, nir_variable
*in_var
)
762 return in_var
->data
.location
== out_var
->data
.location
&&
763 in_var
->data
.location_frac
== out_var
->data
.location_frac
;
766 static nir_variable
*
767 get_matching_input_var(nir_shader
*consumer
, nir_variable
*out_var
)
769 nir_foreach_variable(var
, &consumer
->inputs
) {
770 if (does_varying_match(out_var
, var
))
778 can_replace_varying(nir_variable
*out_var
)
780 /* Skip types that require more complex handling.
781 * TODO: add support for these types.
783 if (glsl_type_is_array(out_var
->type
) ||
784 glsl_type_is_dual_slot(out_var
->type
) ||
785 glsl_type_is_matrix(out_var
->type
) ||
786 glsl_type_is_struct_or_ifc(out_var
->type
))
789 /* Limit this pass to scalars for now to keep things simple. Most varyings
790 * should have been lowered to scalars at this point anyway.
792 if (!glsl_type_is_scalar(out_var
->type
))
795 if (out_var
->data
.location
< VARYING_SLOT_VAR0
||
796 out_var
->data
.location
- VARYING_SLOT_VAR0
>= MAX_VARYING
)
803 replace_constant_input(nir_shader
*shader
, nir_intrinsic_instr
*store_intr
)
805 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
808 nir_builder_init(&b
, impl
);
810 nir_variable
*out_var
=
811 nir_deref_instr_get_variable(nir_src_as_deref(store_intr
->src
[0]));
813 bool progress
= false;
814 nir_foreach_block(block
, impl
) {
815 nir_foreach_instr(instr
, block
) {
816 if (instr
->type
!= nir_instr_type_intrinsic
)
819 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
820 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
823 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
824 if (in_deref
->mode
!= nir_var_shader_in
)
827 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
829 if (!does_varying_match(out_var
, in_var
))
832 b
.cursor
= nir_before_instr(instr
);
834 nir_load_const_instr
*out_const
=
835 nir_instr_as_load_const(store_intr
->src
[1].ssa
->parent_instr
);
837 /* Add new const to replace the input */
838 nir_ssa_def
*nconst
= nir_build_imm(&b
, store_intr
->num_components
,
839 intr
->dest
.ssa
.bit_size
,
842 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(nconst
));
852 replace_duplicate_input(nir_shader
*shader
, nir_variable
*input_var
,
853 nir_intrinsic_instr
*dup_store_intr
)
857 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
860 nir_builder_init(&b
, impl
);
862 nir_variable
*dup_out_var
=
863 nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr
->src
[0]));
865 bool progress
= false;
866 nir_foreach_block(block
, impl
) {
867 nir_foreach_instr(instr
, block
) {
868 if (instr
->type
!= nir_instr_type_intrinsic
)
871 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
872 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
875 nir_deref_instr
*in_deref
= nir_src_as_deref(intr
->src
[0]);
876 if (in_deref
->mode
!= nir_var_shader_in
)
879 nir_variable
*in_var
= nir_deref_instr_get_variable(in_deref
);
881 if (!does_varying_match(dup_out_var
, in_var
) ||
882 in_var
->data
.interpolation
!= input_var
->data
.interpolation
||
883 get_interp_loc(in_var
) != get_interp_loc(input_var
))
886 b
.cursor
= nir_before_instr(instr
);
888 nir_ssa_def
*load
= nir_load_var(&b
, input_var
);
889 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(load
));
899 nir_link_opt_varyings(nir_shader
*producer
, nir_shader
*consumer
)
901 /* TODO: Add support for more shader stage combinations */
902 if (consumer
->info
.stage
!= MESA_SHADER_FRAGMENT
||
903 (producer
->info
.stage
!= MESA_SHADER_VERTEX
&&
904 producer
->info
.stage
!= MESA_SHADER_TESS_EVAL
))
907 bool progress
= false;
909 nir_function_impl
*impl
= nir_shader_get_entrypoint(producer
);
911 struct hash_table
*varying_values
= _mesa_pointer_hash_table_create(NULL
);
913 /* If we find a store in the last block of the producer we can be sure this
914 * is the only possible value for this output.
916 nir_block
*last_block
= nir_impl_last_block(impl
);
917 nir_foreach_instr_reverse(instr
, last_block
) {
918 if (instr
->type
!= nir_instr_type_intrinsic
)
921 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
923 if (intr
->intrinsic
!= nir_intrinsic_store_deref
)
926 nir_deref_instr
*out_deref
= nir_src_as_deref(intr
->src
[0]);
927 if (out_deref
->mode
!= nir_var_shader_out
)
930 nir_variable
*out_var
= nir_deref_instr_get_variable(out_deref
);
931 if (!can_replace_varying(out_var
))
934 if (intr
->src
[1].ssa
->parent_instr
->type
== nir_instr_type_load_const
) {
935 progress
|= replace_constant_input(consumer
, intr
);
937 struct hash_entry
*entry
=
938 _mesa_hash_table_search(varying_values
, intr
->src
[1].ssa
);
940 progress
|= replace_duplicate_input(consumer
,
941 (nir_variable
*) entry
->data
,
944 nir_variable
*in_var
= get_matching_input_var(consumer
, out_var
);
946 _mesa_hash_table_insert(varying_values
, intr
->src
[1].ssa
,
953 _mesa_hash_table_destroy(varying_values
, NULL
);