2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
27 #include "util/hash_table.h"
29 /* This file contains various little helpers for doing simple linking in
30 * NIR. Eventually, we'll probably want a full-blown varying packing
31 * implementation in here. Right now, it just deletes unused things.
35 * Returns the bits in the inputs_read, outputs_written, or
36 * system_values_read bitfield corresponding to this variable.
39 get_variable_io_mask(nir_variable
*var
, gl_shader_stage stage
)
41 if (var
->data
.location
< 0)
44 unsigned location
= var
->data
.patch
?
45 var
->data
.location
- VARYING_SLOT_PATCH0
: var
->data
.location
;
47 assert(var
->data
.mode
== nir_var_shader_in
||
48 var
->data
.mode
== nir_var_shader_out
||
49 var
->data
.mode
== nir_var_system_value
);
50 assert(var
->data
.location
>= 0);
52 const struct glsl_type
*type
= var
->type
;
53 if (nir_is_per_vertex_io(var
, stage
)) {
54 assert(glsl_type_is_array(type
));
55 type
= glsl_get_array_element(type
);
58 unsigned slots
= glsl_count_attribute_slots(type
, false);
59 return ((1ull << slots
) - 1) << location
;
63 tcs_add_output_reads(nir_shader
*shader
, uint64_t *read
, uint64_t *patches_read
)
65 nir_foreach_function(function
, shader
) {
69 nir_foreach_block(block
, function
->impl
) {
70 nir_foreach_instr(instr
, block
) {
71 if (instr
->type
!= nir_instr_type_intrinsic
)
74 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
75 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
79 nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[0]));
81 if (var
->data
.mode
!= nir_var_shader_out
)
84 if (var
->data
.patch
) {
85 patches_read
[var
->data
.location_frac
] |=
86 get_variable_io_mask(var
, shader
->info
.stage
);
88 read
[var
->data
.location_frac
] |=
89 get_variable_io_mask(var
, shader
->info
.stage
);
97 * Helper for removing unused shader I/O variables, by demoting them to global
98 * variables (which may then by dead code eliminated).
102 * progress = nir_remove_unused_io_vars(producer,
103 * &producer->outputs,
104 * read, patches_read) ||
107 * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
108 * representing each .location_frac used. Note that for vector variables,
109 * only the first channel (.location_frac) is examined for deciding if the
113 nir_remove_unused_io_vars(nir_shader
*shader
, struct exec_list
*var_list
,
114 uint64_t *used_by_other_stage
,
115 uint64_t *used_by_other_stage_patches
)
117 bool progress
= false;
120 nir_foreach_variable_safe(var
, var_list
) {
122 used
= used_by_other_stage_patches
;
124 used
= used_by_other_stage
;
126 if (var
->data
.location
< VARYING_SLOT_VAR0
&& var
->data
.location
>= 0)
129 if (var
->data
.always_active_io
)
132 uint64_t other_stage
= used
[var
->data
.location_frac
];
134 if (!(other_stage
& get_variable_io_mask(var
, shader
->info
.stage
))) {
135 /* This one is invalid, make it a global variable instead */
136 var
->data
.location
= 0;
137 var
->data
.mode
= nir_var_global
;
139 exec_node_remove(&var
->node
);
140 exec_list_push_tail(&shader
->globals
, &var
->node
);
147 nir_fixup_deref_modes(shader
);
153 nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
)
155 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
156 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
158 uint64_t read
[4] = { 0 }, written
[4] = { 0 };
159 uint64_t patches_read
[4] = { 0 }, patches_written
[4] = { 0 };
161 nir_foreach_variable(var
, &producer
->outputs
) {
162 if (var
->data
.patch
) {
163 patches_written
[var
->data
.location_frac
] |=
164 get_variable_io_mask(var
, producer
->info
.stage
);
166 written
[var
->data
.location_frac
] |=
167 get_variable_io_mask(var
, producer
->info
.stage
);
171 nir_foreach_variable(var
, &consumer
->inputs
) {
172 if (var
->data
.patch
) {
173 patches_read
[var
->data
.location_frac
] |=
174 get_variable_io_mask(var
, consumer
->info
.stage
);
176 read
[var
->data
.location_frac
] |=
177 get_variable_io_mask(var
, consumer
->info
.stage
);
181 /* Each TCS invocation can read data written by other TCS invocations,
182 * so even if the outputs are not used by the TES we must also make
183 * sure they are not read by the TCS before demoting them to globals.
185 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
)
186 tcs_add_output_reads(producer
, read
, patches_read
);
188 bool progress
= false;
189 progress
= nir_remove_unused_io_vars(producer
, &producer
->outputs
, read
,
192 progress
= nir_remove_unused_io_vars(consumer
, &consumer
->inputs
, written
,
193 patches_written
) || progress
;
199 get_interp_type(nir_variable
*var
, bool default_to_smooth_interp
)
201 if (var
->data
.interpolation
!= INTERP_MODE_NONE
)
202 return var
->data
.interpolation
;
203 else if (default_to_smooth_interp
)
204 return INTERP_MODE_SMOOTH
;
206 return INTERP_MODE_NONE
;
209 #define INTERPOLATE_LOC_SAMPLE 0
210 #define INTERPOLATE_LOC_CENTROID 1
211 #define INTERPOLATE_LOC_CENTER 2
214 get_interp_loc(nir_variable
*var
)
216 if (var
->data
.sample
)
217 return INTERPOLATE_LOC_SAMPLE
;
218 else if (var
->data
.centroid
)
219 return INTERPOLATE_LOC_CENTROID
;
221 return INTERPOLATE_LOC_CENTER
;
225 get_slot_component_masks_and_interp_types(struct exec_list
*var_list
,
227 uint8_t *interp_type
,
229 gl_shader_stage stage
,
230 bool default_to_smooth_interp
)
232 nir_foreach_variable_safe(var
, var_list
) {
233 assert(var
->data
.location
>= 0);
235 /* Only remap things that aren't built-ins.
236 * TODO: add TES patch support.
238 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
239 var
->data
.location
- VARYING_SLOT_VAR0
< 32) {
241 const struct glsl_type
*type
= var
->type
;
242 if (nir_is_per_vertex_io(var
, stage
)) {
243 assert(glsl_type_is_array(type
));
244 type
= glsl_get_array_element(type
);
247 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
249 glsl_get_vector_elements(glsl_without_array(type
));
251 bool dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
252 unsigned slots
= glsl_count_attribute_slots(type
, false);
253 unsigned comps_slot2
= 0;
254 for (unsigned i
= 0; i
< slots
; i
++) {
255 interp_type
[location
+ i
] =
256 get_interp_type(var
, default_to_smooth_interp
);
257 interp_loc
[location
+ i
] = get_interp_loc(var
);
261 comps
[location
+ i
] |= ((1 << comps_slot2
) - 1);
263 unsigned num_comps
= 4 - var
->data
.location_frac
;
264 comps_slot2
= (elements
* 2) - num_comps
;
266 /* Assume ARB_enhanced_layouts packing rules for doubles */
267 assert(var
->data
.location_frac
== 0 ||
268 var
->data
.location_frac
== 2);
269 assert(comps_slot2
<= 4);
271 comps
[location
+ i
] |=
272 ((1 << num_comps
) - 1) << var
->data
.location_frac
;
275 comps
[location
+ i
] |=
276 ((1 << elements
) - 1) << var
->data
.location_frac
;
290 remap_slots_and_components(struct exec_list
*var_list
, gl_shader_stage stage
,
291 struct varying_loc (*remap
)[4],
292 uint64_t *slots_used
, uint64_t *out_slots_read
)
294 uint64_t out_slots_read_tmp
= 0;
296 /* We don't touch builtins so just copy the bitmask */
297 uint64_t slots_used_tmp
=
298 *slots_used
& (((uint64_t)1 << (VARYING_SLOT_VAR0
- 1)) - 1);
300 nir_foreach_variable(var
, var_list
) {
301 assert(var
->data
.location
>= 0);
303 /* Only remap things that aren't built-ins */
304 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
305 var
->data
.location
- VARYING_SLOT_VAR0
< 32) {
306 assert(var
->data
.location
- VARYING_SLOT_VAR0
< 32);
308 const struct glsl_type
*type
= var
->type
;
309 if (nir_is_per_vertex_io(var
, stage
)) {
310 assert(glsl_type_is_array(type
));
311 type
= glsl_get_array_element(type
);
314 unsigned num_slots
= glsl_count_attribute_slots(type
, false);
315 bool used_across_stages
= false;
316 bool outputs_read
= false;
318 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
319 struct varying_loc
*new_loc
= &remap
[location
][var
->data
.location_frac
];
321 uint64_t slots
= (((uint64_t)1 << num_slots
) - 1) << var
->data
.location
;
322 if (slots
& *slots_used
)
323 used_across_stages
= true;
325 if (slots
& *out_slots_read
)
328 if (new_loc
->location
) {
329 var
->data
.location
= new_loc
->location
;
330 var
->data
.location_frac
= new_loc
->component
;
333 if (var
->data
.always_active_io
) {
334 /* We can't apply link time optimisations (specifically array
335 * splitting) to these so we need to copy the existing mask
336 * otherwise we will mess up the mask for things like partially
339 if (used_across_stages
) {
341 *slots_used
& (((uint64_t)1 << num_slots
) - 1) << var
->data
.location
;
345 out_slots_read_tmp
|=
346 *out_slots_read
& (((uint64_t)1 << num_slots
) - 1) << var
->data
.location
;
350 for (unsigned i
= 0; i
< num_slots
; i
++) {
351 if (used_across_stages
)
352 slots_used_tmp
|= (uint64_t)1 << (var
->data
.location
+ i
);
355 out_slots_read_tmp
|= (uint64_t)1 << (var
->data
.location
+ i
);
361 *slots_used
= slots_used_tmp
;
362 *out_slots_read
= out_slots_read_tmp
;
365 /* If there are empty components in the slot compact the remaining components
366 * as close to component 0 as possible. This will make it easier to fill the
367 * empty components with components from a different slot in a following pass.
370 compact_components(nir_shader
*producer
, nir_shader
*consumer
, uint8_t *comps
,
371 uint8_t *interp_type
, uint8_t *interp_loc
,
372 bool default_to_smooth_interp
)
374 struct exec_list
*input_list
= &consumer
->inputs
;
375 struct exec_list
*output_list
= &producer
->outputs
;
376 struct varying_loc remap
[32][4] = {{{0}, {0}}};
378 /* Create a cursor for each interpolation type */
379 unsigned cursor
[4] = {0};
381 /* We only need to pass over one stage and we choose the consumer as it seems
382 * to cause a larger reduction in instruction counts (tested on i965).
384 nir_foreach_variable(var
, input_list
) {
386 /* Only remap things that aren't builtins.
387 * TODO: add TES patch support.
389 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
390 var
->data
.location
- VARYING_SLOT_VAR0
< 32) {
392 /* We can't repack xfb varyings. */
393 if (var
->data
.always_active_io
)
396 const struct glsl_type
*type
= var
->type
;
397 if (nir_is_per_vertex_io(var
, consumer
->info
.stage
)) {
398 assert(glsl_type_is_array(type
));
399 type
= glsl_get_array_element(type
);
402 /* Skip types that require more complex packing handling.
403 * TODO: add support for these types.
405 if (glsl_type_is_array(type
) ||
406 glsl_type_is_dual_slot(type
) ||
407 glsl_type_is_matrix(type
) ||
408 glsl_type_is_struct(type
) ||
409 glsl_type_is_64bit(type
))
412 /* We ignore complex types above and all other vector types should
413 * have been split into scalar variables by the lower_io_to_scalar
414 * pass. The only exeption should by OpenGL xfb varyings.
416 if (glsl_get_vector_elements(type
) != 1)
419 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
420 uint8_t used_comps
= comps
[location
];
422 /* If there are no empty components there is nothing more for us to do.
424 if (used_comps
== 0xf)
427 bool found_new_offset
= false;
428 uint8_t interp
= get_interp_type(var
, default_to_smooth_interp
);
429 for (; cursor
[interp
] < 32; cursor
[interp
]++) {
430 uint8_t cursor_used_comps
= comps
[cursor
[interp
]];
432 /* We couldn't find anywhere to pack the varying continue on. */
433 if (cursor
[interp
] == location
&&
434 (var
->data
.location_frac
== 0 ||
435 cursor_used_comps
& ((1 << (var
->data
.location_frac
)) - 1)))
438 /* We can only pack varyings with matching interpolation types */
439 if (interp_type
[cursor
[interp
]] != interp
)
442 /* Interpolation loc must match also.
443 * TODO: i965 can handle these if they don't match, but the
444 * radeonsi nir backend handles everything as vec4s and so expects
445 * this to be the same for all components. We could make this
446 * check driver specfific or drop it if NIR ever become the only
449 if (interp_loc
[cursor
[interp
]] != get_interp_loc(var
))
452 /* If the slot is empty just skip it for now, compact_var_list()
453 * can be called after this function to remove empty slots for us.
454 * TODO: finish implementing compact_var_list() requires array and
457 if (!cursor_used_comps
)
460 uint8_t unused_comps
= ~cursor_used_comps
;
462 for (unsigned i
= 0; i
< 4; i
++) {
463 uint8_t new_var_comps
= 1 << i
;
464 if (unused_comps
& new_var_comps
) {
465 remap
[location
][var
->data
.location_frac
].component
= i
;
466 remap
[location
][var
->data
.location_frac
].location
=
467 cursor
[interp
] + VARYING_SLOT_VAR0
;
469 found_new_offset
= true;
471 /* Turn off the mask for the component we are remapping */
472 if (comps
[location
] & 1 << var
->data
.location_frac
) {
473 comps
[location
] ^= 1 << var
->data
.location_frac
;
474 comps
[cursor
[interp
]] |= new_var_comps
;
480 if (found_new_offset
)
487 remap_slots_and_components(input_list
, consumer
->info
.stage
, remap
,
488 &consumer
->info
.inputs_read
, &zero
);
489 remap_slots_and_components(output_list
, producer
->info
.stage
, remap
,
490 &producer
->info
.outputs_written
,
491 &producer
->info
.outputs_read
);
494 /* We assume that this has been called more-or-less directly after
495 * remove_unused_varyings. At this point, all of the varyings that we
496 * aren't going to be using have been completely removed and the
497 * inputs_read and outputs_written fields in nir_shader_info reflect
498 * this. Therefore, the total set of valid slots is the OR of the two
499 * sets of varyings; this accounts for varyings which one side may need
500 * to read/write even if the other doesn't. This can happen if, for
501 * instance, an array is used indirectly from one side causing it to be
502 * unsplittable but directly from the other.
505 nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
506 bool default_to_smooth_interp
)
508 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
509 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
511 uint8_t comps
[32] = {0};
512 uint8_t interp_type
[32] = {0};
513 uint8_t interp_loc
[32] = {0};
515 get_slot_component_masks_and_interp_types(&producer
->outputs
, comps
,
516 interp_type
, interp_loc
,
517 producer
->info
.stage
,
518 default_to_smooth_interp
);
519 get_slot_component_masks_and_interp_types(&consumer
->inputs
, comps
,
520 interp_type
, interp_loc
,
521 consumer
->info
.stage
,
522 default_to_smooth_interp
);
524 compact_components(producer
, consumer
, comps
, interp_type
, interp_loc
,
525 default_to_smooth_interp
);
529 * Mark XFB varyings as always_active_io in the consumer so the linking opts
533 nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
)
535 nir_variable
*input_vars
[MAX_VARYING
] = { 0 };
537 nir_foreach_variable(var
, &consumer
->inputs
) {
538 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
539 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
541 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
542 input_vars
[location
] = var
;
546 nir_foreach_variable(var
, &producer
->outputs
) {
547 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
548 var
->data
.location
- VARYING_SLOT_VAR0
< MAX_VARYING
) {
550 if (!var
->data
.always_active_io
)
553 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
554 if (input_vars
[location
]) {
555 input_vars
[location
]->data
.always_active_io
= true;
562 try_replace_constant_input(nir_shader
*shader
,
563 nir_intrinsic_instr
*store_intr
)
565 nir_variable
*out_var
=
566 nir_deref_instr_get_variable(nir_src_as_deref(store_intr
->src
[0]));
568 if (out_var
->data
.mode
!= nir_var_shader_out
)
571 /* Skip types that require more complex handling.
572 * TODO: add support for these types.
574 if (glsl_type_is_array(out_var
->type
) ||
575 glsl_type_is_dual_slot(out_var
->type
) ||
576 glsl_type_is_matrix(out_var
->type
) ||
577 glsl_type_is_struct(out_var
->type
))
580 /* Limit this pass to scalars for now to keep things simple. Most varyings
581 * should have been lowered to scalars at this point anyway.
583 if (store_intr
->num_components
!= 1)
586 if (out_var
->data
.location
< VARYING_SLOT_VAR0
||
587 out_var
->data
.location
- VARYING_SLOT_VAR0
>= MAX_VARYING
)
590 nir_function_impl
*impl
= nir_shader_get_entrypoint(shader
);
593 nir_builder_init(&b
, impl
);
595 bool progress
= false;
596 nir_foreach_block(block
, impl
) {
597 nir_foreach_instr(instr
, block
) {
598 if (instr
->type
!= nir_instr_type_intrinsic
)
601 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
602 if (intr
->intrinsic
!= nir_intrinsic_load_deref
)
605 nir_variable
*in_var
=
606 nir_deref_instr_get_variable(nir_src_as_deref(intr
->src
[0]));
608 if (in_var
->data
.mode
!= nir_var_shader_in
)
611 if (in_var
->data
.location
!= out_var
->data
.location
||
612 in_var
->data
.location_frac
!= out_var
->data
.location_frac
)
615 b
.cursor
= nir_before_instr(instr
);
617 nir_load_const_instr
*out_const
=
618 nir_instr_as_load_const(store_intr
->src
[1].ssa
->parent_instr
);
620 /* Add new const to replace the input */
621 nir_ssa_def
*nconst
= nir_build_imm(&b
, store_intr
->num_components
,
622 intr
->dest
.ssa
.bit_size
,
625 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(nconst
));
635 nir_link_constant_varyings(nir_shader
*producer
, nir_shader
*consumer
)
637 /* TODO: Add support for more shader stage combinations */
638 if (consumer
->info
.stage
!= MESA_SHADER_FRAGMENT
||
639 (producer
->info
.stage
!= MESA_SHADER_VERTEX
&&
640 producer
->info
.stage
!= MESA_SHADER_TESS_EVAL
))
643 bool progress
= false;
645 nir_function_impl
*impl
= nir_shader_get_entrypoint(producer
);
647 /* If we find a store in the last block of the producer we can be sure this
648 * is the only possible value for this output.
650 nir_block
*last_block
= nir_impl_last_block(impl
);
651 nir_foreach_instr_reverse(instr
, last_block
) {
652 if (instr
->type
!= nir_instr_type_intrinsic
)
655 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
657 if (intr
->intrinsic
!= nir_intrinsic_store_deref
)
660 if (intr
->src
[1].ssa
->parent_instr
->type
!= nir_instr_type_load_const
) {
664 progress
|= try_replace_constant_input(consumer
, intr
);