2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/hash_table.h"
28 /* This file contains various little helpers for doing simple linking in
29 * NIR. Eventually, we'll probably want a full-blown varying packing
30 * implementation in here. Right now, it just deletes unused things.
34 * Returns the bits in the inputs_read, outputs_written, or
35 * system_values_read bitfield corresponding to this variable.
38 get_variable_io_mask(nir_variable
*var
, gl_shader_stage stage
)
40 if (var
->data
.location
< 0)
43 unsigned location
= var
->data
.patch
?
44 var
->data
.location
- VARYING_SLOT_PATCH0
: var
->data
.location
;
46 assert(var
->data
.mode
== nir_var_shader_in
||
47 var
->data
.mode
== nir_var_shader_out
||
48 var
->data
.mode
== nir_var_system_value
);
49 assert(var
->data
.location
>= 0);
51 const struct glsl_type
*type
= var
->type
;
52 if (nir_is_per_vertex_io(var
, stage
)) {
53 assert(glsl_type_is_array(type
));
54 type
= glsl_get_array_element(type
);
57 unsigned slots
= glsl_count_attribute_slots(type
, false);
58 return ((1ull << slots
) - 1) << location
;
62 tcs_add_output_reads(nir_shader
*shader
, uint64_t *read
, uint64_t *patches_read
)
64 nir_foreach_function(function
, shader
) {
68 nir_foreach_block(block
, function
->impl
) {
69 nir_foreach_instr(instr
, block
) {
70 if (instr
->type
!= nir_instr_type_intrinsic
)
73 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
74 if (intrin
->intrinsic
!= nir_intrinsic_load_deref
)
78 nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[0]));
80 if (var
->data
.mode
!= nir_var_shader_out
)
83 if (var
->data
.patch
) {
84 patches_read
[var
->data
.location_frac
] |=
85 get_variable_io_mask(var
, shader
->info
.stage
);
87 read
[var
->data
.location_frac
] |=
88 get_variable_io_mask(var
, shader
->info
.stage
);
96 remove_unused_io_vars(nir_shader
*shader
, struct exec_list
*var_list
,
97 uint64_t *used_by_other_stage
,
98 uint64_t *used_by_other_stage_patches
)
100 bool progress
= false;
103 nir_foreach_variable_safe(var
, var_list
) {
105 used
= used_by_other_stage_patches
;
107 used
= used_by_other_stage
;
109 if (var
->data
.location
< VARYING_SLOT_VAR0
&& var
->data
.location
>= 0)
112 if (var
->data
.always_active_io
)
115 uint64_t other_stage
= used
[var
->data
.location_frac
];
117 if (!(other_stage
& get_variable_io_mask(var
, shader
->info
.stage
))) {
118 /* This one is invalid, make it a global variable instead */
119 var
->data
.location
= 0;
120 var
->data
.mode
= nir_var_global
;
122 exec_node_remove(&var
->node
);
123 exec_list_push_tail(&shader
->globals
, &var
->node
);
133 nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
)
135 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
136 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
138 uint64_t read
[4] = { 0 }, written
[4] = { 0 };
139 uint64_t patches_read
[4] = { 0 }, patches_written
[4] = { 0 };
141 nir_foreach_variable(var
, &producer
->outputs
) {
142 if (var
->data
.patch
) {
143 patches_written
[var
->data
.location_frac
] |=
144 get_variable_io_mask(var
, producer
->info
.stage
);
146 written
[var
->data
.location_frac
] |=
147 get_variable_io_mask(var
, producer
->info
.stage
);
151 nir_foreach_variable(var
, &consumer
->inputs
) {
152 if (var
->data
.patch
) {
153 patches_read
[var
->data
.location_frac
] |=
154 get_variable_io_mask(var
, consumer
->info
.stage
);
156 read
[var
->data
.location_frac
] |=
157 get_variable_io_mask(var
, consumer
->info
.stage
);
161 /* Each TCS invocation can read data written by other TCS invocations,
162 * so even if the outputs are not used by the TES we must also make
163 * sure they are not read by the TCS before demoting them to globals.
165 if (producer
->info
.stage
== MESA_SHADER_TESS_CTRL
)
166 tcs_add_output_reads(producer
, read
, patches_read
);
168 bool progress
= false;
169 progress
= remove_unused_io_vars(producer
, &producer
->outputs
, read
,
172 progress
= remove_unused_io_vars(consumer
, &consumer
->inputs
, written
,
173 patches_written
) || progress
;
179 get_interp_type(nir_variable
*var
, bool default_to_smooth_interp
)
181 if (var
->data
.interpolation
!= INTERP_MODE_NONE
)
182 return var
->data
.interpolation
;
183 else if (default_to_smooth_interp
)
184 return INTERP_MODE_SMOOTH
;
186 return INTERP_MODE_NONE
;
189 #define INTERPOLATE_LOC_SAMPLE 0
190 #define INTERPOLATE_LOC_CENTROID 1
191 #define INTERPOLATE_LOC_CENTER 2
194 get_interp_loc(nir_variable
*var
)
196 if (var
->data
.sample
)
197 return INTERPOLATE_LOC_SAMPLE
;
198 else if (var
->data
.centroid
)
199 return INTERPOLATE_LOC_CENTROID
;
201 return INTERPOLATE_LOC_CENTER
;
205 get_slot_component_masks_and_interp_types(struct exec_list
*var_list
,
207 uint8_t *interp_type
,
209 gl_shader_stage stage
,
210 bool default_to_smooth_interp
)
212 nir_foreach_variable_safe(var
, var_list
) {
213 assert(var
->data
.location
>= 0);
215 /* Only remap things that aren't built-ins.
216 * TODO: add TES patch support.
218 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
219 var
->data
.location
- VARYING_SLOT_VAR0
< 32) {
221 const struct glsl_type
*type
= var
->type
;
222 if (nir_is_per_vertex_io(var
, stage
)) {
223 assert(glsl_type_is_array(type
));
224 type
= glsl_get_array_element(type
);
227 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
229 glsl_get_vector_elements(glsl_without_array(type
));
231 bool dual_slot
= glsl_type_is_dual_slot(glsl_without_array(type
));
232 unsigned slots
= glsl_count_attribute_slots(type
, false);
233 unsigned comps_slot2
= 0;
234 for (unsigned i
= 0; i
< slots
; i
++) {
235 interp_type
[location
+ i
] =
236 get_interp_type(var
, default_to_smooth_interp
);
237 interp_loc
[location
+ i
] = get_interp_loc(var
);
241 comps
[location
+ i
] |= ((1 << comps_slot2
) - 1);
243 unsigned num_comps
= 4 - var
->data
.location_frac
;
244 comps_slot2
= (elements
* 2) - num_comps
;
246 /* Assume ARB_enhanced_layouts packing rules for doubles */
247 assert(var
->data
.location_frac
== 0 ||
248 var
->data
.location_frac
== 2);
249 assert(comps_slot2
<= 4);
251 comps
[location
+ i
] |=
252 ((1 << num_comps
) - 1) << var
->data
.location_frac
;
255 comps
[location
+ i
] |=
256 ((1 << elements
) - 1) << var
->data
.location_frac
;
270 remap_slots_and_components(struct exec_list
*var_list
, gl_shader_stage stage
,
271 struct varying_loc (*remap
)[4],
272 uint64_t *slots_used
, uint64_t *out_slots_read
)
274 uint64_t out_slots_read_tmp
= 0;
276 /* We don't touch builtins so just copy the bitmask */
277 uint64_t slots_used_tmp
=
278 *slots_used
& (((uint64_t)1 << (VARYING_SLOT_VAR0
- 1)) - 1);
280 nir_foreach_variable(var
, var_list
) {
281 assert(var
->data
.location
>= 0);
283 /* Only remap things that aren't built-ins */
284 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
285 var
->data
.location
- VARYING_SLOT_VAR0
< 32) {
286 assert(var
->data
.location
- VARYING_SLOT_VAR0
< 32);
288 const struct glsl_type
*type
= var
->type
;
289 if (nir_is_per_vertex_io(var
, stage
)) {
290 assert(glsl_type_is_array(type
));
291 type
= glsl_get_array_element(type
);
294 unsigned num_slots
= glsl_count_attribute_slots(type
, false);
295 bool used_across_stages
= false;
296 bool outputs_read
= false;
298 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
299 struct varying_loc
*new_loc
= &remap
[location
][var
->data
.location_frac
];
301 uint64_t slots
= (((uint64_t)1 << num_slots
) - 1) << var
->data
.location
;
302 if (slots
& *slots_used
)
303 used_across_stages
= true;
305 if (slots
& *out_slots_read
)
308 if (new_loc
->location
) {
309 var
->data
.location
= new_loc
->location
;
310 var
->data
.location_frac
= new_loc
->component
;
313 if (var
->data
.always_active_io
) {
314 /* We can't apply link time optimisations (specifically array
315 * splitting) to these so we need to copy the existing mask
316 * otherwise we will mess up the mask for things like partially
319 if (used_across_stages
) {
321 *slots_used
& (((uint64_t)1 << num_slots
) - 1) << var
->data
.location
;
325 out_slots_read_tmp
|=
326 *out_slots_read
& (((uint64_t)1 << num_slots
) - 1) << var
->data
.location
;
330 for (unsigned i
= 0; i
< num_slots
; i
++) {
331 if (used_across_stages
)
332 slots_used_tmp
|= (uint64_t)1 << (var
->data
.location
+ i
);
335 out_slots_read_tmp
|= (uint64_t)1 << (var
->data
.location
+ i
);
341 *slots_used
= slots_used_tmp
;
342 *out_slots_read
= out_slots_read_tmp
;
345 /* If there are empty components in the slot compact the remaining components
346 * as close to component 0 as possible. This will make it easier to fill the
347 * empty components with components from a different slot in a following pass.
350 compact_components(nir_shader
*producer
, nir_shader
*consumer
, uint8_t *comps
,
351 uint8_t *interp_type
, uint8_t *interp_loc
,
352 bool default_to_smooth_interp
)
354 struct exec_list
*input_list
= &consumer
->inputs
;
355 struct exec_list
*output_list
= &producer
->outputs
;
356 struct varying_loc remap
[32][4] = {{{0}, {0}}};
358 /* Create a cursor for each interpolation type */
359 unsigned cursor
[4] = {0};
361 /* We only need to pass over one stage and we choose the consumer as it seems
362 * to cause a larger reduction in instruction counts (tested on i965).
364 nir_foreach_variable(var
, input_list
) {
366 /* Only remap things that aren't builtins.
367 * TODO: add TES patch support.
369 if (var
->data
.location
>= VARYING_SLOT_VAR0
&&
370 var
->data
.location
- VARYING_SLOT_VAR0
< 32) {
372 /* We can't repack xfb varyings. */
373 if (var
->data
.always_active_io
)
376 const struct glsl_type
*type
= var
->type
;
377 if (nir_is_per_vertex_io(var
, consumer
->info
.stage
)) {
378 assert(glsl_type_is_array(type
));
379 type
= glsl_get_array_element(type
);
382 /* Skip types that require more complex packing handling.
383 * TODO: add support for these types.
385 if (glsl_type_is_array(type
) ||
386 glsl_type_is_dual_slot(type
) ||
387 glsl_type_is_matrix(type
) ||
388 glsl_type_is_struct(type
) ||
389 glsl_type_is_64bit(type
))
392 /* We ignore complex types above and all other vector types should
393 * have been split into scalar variables by the lower_io_to_scalar
394 * pass. The only exeption should by OpenGL xfb varyings.
396 if (glsl_get_vector_elements(type
) != 1)
399 unsigned location
= var
->data
.location
- VARYING_SLOT_VAR0
;
400 uint8_t used_comps
= comps
[location
];
402 /* If there are no empty components there is nothing more for us to do.
404 if (used_comps
== 0xf)
407 bool found_new_offset
= false;
408 uint8_t interp
= get_interp_type(var
, default_to_smooth_interp
);
409 for (; cursor
[interp
] < 32; cursor
[interp
]++) {
410 uint8_t cursor_used_comps
= comps
[cursor
[interp
]];
412 /* We couldn't find anywhere to pack the varying continue on. */
413 if (cursor
[interp
] == location
&&
414 (var
->data
.location_frac
== 0 ||
415 cursor_used_comps
& ((1 << (var
->data
.location_frac
)) - 1)))
418 /* We can only pack varyings with matching interpolation types */
419 if (interp_type
[cursor
[interp
]] != interp
)
422 /* Interpolation loc must match also.
423 * TODO: i965 can handle these if they don't match, but the
424 * radeonsi nir backend handles everything as vec4s and so expects
425 * this to be the same for all components. We could make this
426 * check driver specfific or drop it if NIR ever become the only
429 if (interp_loc
[cursor
[interp
]] != get_interp_loc(var
))
432 /* If the slot is empty just skip it for now, compact_var_list()
433 * can be called after this function to remove empty slots for us.
434 * TODO: finish implementing compact_var_list() requires array and
437 if (!cursor_used_comps
)
440 uint8_t unused_comps
= ~cursor_used_comps
;
442 for (unsigned i
= 0; i
< 4; i
++) {
443 uint8_t new_var_comps
= 1 << i
;
444 if (unused_comps
& new_var_comps
) {
445 remap
[location
][var
->data
.location_frac
].component
= i
;
446 remap
[location
][var
->data
.location_frac
].location
=
447 cursor
[interp
] + VARYING_SLOT_VAR0
;
449 found_new_offset
= true;
451 /* Turn off the mask for the component we are remapping */
452 if (comps
[location
] & 1 << var
->data
.location_frac
) {
453 comps
[location
] ^= 1 << var
->data
.location_frac
;
454 comps
[cursor
[interp
]] |= new_var_comps
;
460 if (found_new_offset
)
467 remap_slots_and_components(input_list
, consumer
->info
.stage
, remap
,
468 &consumer
->info
.inputs_read
, &zero
);
469 remap_slots_and_components(output_list
, producer
->info
.stage
, remap
,
470 &producer
->info
.outputs_written
,
471 &producer
->info
.outputs_read
);
474 /* We assume that this has been called more-or-less directly after
475 * remove_unused_varyings. At this point, all of the varyings that we
476 * aren't going to be using have been completely removed and the
477 * inputs_read and outputs_written fields in nir_shader_info reflect
478 * this. Therefore, the total set of valid slots is the OR of the two
479 * sets of varyings; this accounts for varyings which one side may need
480 * to read/write even if the other doesn't. This can happen if, for
481 * instance, an array is used indirectly from one side causing it to be
482 * unsplittable but directly from the other.
485 nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
486 bool default_to_smooth_interp
)
488 assert(producer
->info
.stage
!= MESA_SHADER_FRAGMENT
);
489 assert(consumer
->info
.stage
!= MESA_SHADER_VERTEX
);
491 uint8_t comps
[32] = {0};
492 uint8_t interp_type
[32] = {0};
493 uint8_t interp_loc
[32] = {0};
495 get_slot_component_masks_and_interp_types(&producer
->outputs
, comps
,
496 interp_type
, interp_loc
,
497 producer
->info
.stage
,
498 default_to_smooth_interp
);
499 get_slot_component_masks_and_interp_types(&consumer
->inputs
, comps
,
500 interp_type
, interp_loc
,
501 consumer
->info
.stage
,
502 default_to_smooth_interp
);
504 compact_components(producer
, consumer
, comps
, interp_type
, interp_loc
,
505 default_to_smooth_interp
);