nir: Remove deref chain support from remove_unused_varyings
[mesa.git] / src / compiler / nir / nir_linking_helpers.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "util/set.h"
26 #include "util/hash_table.h"
27
28 /* This file contains various little helpers for doing simple linking in
29 * NIR. Eventually, we'll probably want a full-blown varying packing
30 * implementation in here. Right now, it just deletes unused things.
31 */
32
33 /**
34 * Returns the bits in the inputs_read, outputs_written, or
35 * system_values_read bitfield corresponding to this variable.
36 */
37 static uint64_t
38 get_variable_io_mask(nir_variable *var, gl_shader_stage stage)
39 {
40 if (var->data.location < 0)
41 return 0;
42
43 unsigned location = var->data.patch ?
44 var->data.location - VARYING_SLOT_PATCH0 : var->data.location;
45
46 assert(var->data.mode == nir_var_shader_in ||
47 var->data.mode == nir_var_shader_out ||
48 var->data.mode == nir_var_system_value);
49 assert(var->data.location >= 0);
50
51 const struct glsl_type *type = var->type;
52 if (nir_is_per_vertex_io(var, stage)) {
53 assert(glsl_type_is_array(type));
54 type = glsl_get_array_element(type);
55 }
56
57 unsigned slots = glsl_count_attribute_slots(type, false);
58 return ((1ull << slots) - 1) << location;
59 }
60
61 static void
62 tcs_add_output_reads(nir_shader *shader, uint64_t *read, uint64_t *patches_read)
63 {
64 nir_foreach_function(function, shader) {
65 if (!function->impl)
66 continue;
67
68 nir_foreach_block(block, function->impl) {
69 nir_foreach_instr(instr, block) {
70 if (instr->type != nir_instr_type_intrinsic)
71 continue;
72
73 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
74 if (intrin->intrinsic != nir_intrinsic_load_deref)
75 continue;
76
77 nir_variable *var =
78 nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[0]));
79
80 if (var->data.mode != nir_var_shader_out)
81 continue;
82
83 if (var->data.patch) {
84 patches_read[var->data.location_frac] |=
85 get_variable_io_mask(var, shader->info.stage);
86 } else {
87 read[var->data.location_frac] |=
88 get_variable_io_mask(var, shader->info.stage);
89 }
90 }
91 }
92 }
93 }
94
95 static bool
96 remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
97 uint64_t *used_by_other_stage,
98 uint64_t *used_by_other_stage_patches)
99 {
100 bool progress = false;
101 uint64_t *used;
102
103 nir_foreach_variable_safe(var, var_list) {
104 if (var->data.patch)
105 used = used_by_other_stage_patches;
106 else
107 used = used_by_other_stage;
108
109 if (var->data.location < VARYING_SLOT_VAR0 && var->data.location >= 0)
110 continue;
111
112 if (var->data.always_active_io)
113 continue;
114
115 uint64_t other_stage = used[var->data.location_frac];
116
117 if (!(other_stage & get_variable_io_mask(var, shader->info.stage))) {
118 /* This one is invalid, make it a global variable instead */
119 var->data.location = 0;
120 var->data.mode = nir_var_global;
121
122 exec_node_remove(&var->node);
123 exec_list_push_tail(&shader->globals, &var->node);
124
125 progress = true;
126 }
127 }
128
129 return progress;
130 }
131
132 bool
133 nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer)
134 {
135 assert(producer->info.stage != MESA_SHADER_FRAGMENT);
136 assert(consumer->info.stage != MESA_SHADER_VERTEX);
137 nir_assert_unlowered_derefs(producer, nir_lower_load_store_derefs);
138 nir_assert_unlowered_derefs(consumer, nir_lower_load_store_derefs);
139
140 uint64_t read[4] = { 0 }, written[4] = { 0 };
141 uint64_t patches_read[4] = { 0 }, patches_written[4] = { 0 };
142
143 nir_foreach_variable(var, &producer->outputs) {
144 if (var->data.patch) {
145 patches_written[var->data.location_frac] |=
146 get_variable_io_mask(var, producer->info.stage);
147 } else {
148 written[var->data.location_frac] |=
149 get_variable_io_mask(var, producer->info.stage);
150 }
151 }
152
153 nir_foreach_variable(var, &consumer->inputs) {
154 if (var->data.patch) {
155 patches_read[var->data.location_frac] |=
156 get_variable_io_mask(var, consumer->info.stage);
157 } else {
158 read[var->data.location_frac] |=
159 get_variable_io_mask(var, consumer->info.stage);
160 }
161 }
162
163 /* Each TCS invocation can read data written by other TCS invocations,
164 * so even if the outputs are not used by the TES we must also make
165 * sure they are not read by the TCS before demoting them to globals.
166 */
167 if (producer->info.stage == MESA_SHADER_TESS_CTRL)
168 tcs_add_output_reads(producer, read, patches_read);
169
170 bool progress = false;
171 progress = remove_unused_io_vars(producer, &producer->outputs, read,
172 patches_read);
173
174 progress = remove_unused_io_vars(consumer, &consumer->inputs, written,
175 patches_written) || progress;
176
177 return progress;
178 }
179
180 static uint8_t
181 get_interp_type(nir_variable *var, bool default_to_smooth_interp)
182 {
183 if (var->data.interpolation != INTERP_MODE_NONE)
184 return var->data.interpolation;
185 else if (default_to_smooth_interp)
186 return INTERP_MODE_SMOOTH;
187 else
188 return INTERP_MODE_NONE;
189 }
190
191 #define INTERPOLATE_LOC_SAMPLE 0
192 #define INTERPOLATE_LOC_CENTROID 1
193 #define INTERPOLATE_LOC_CENTER 2
194
195 static uint8_t
196 get_interp_loc(nir_variable *var)
197 {
198 if (var->data.sample)
199 return INTERPOLATE_LOC_SAMPLE;
200 else if (var->data.centroid)
201 return INTERPOLATE_LOC_CENTROID;
202 else
203 return INTERPOLATE_LOC_CENTER;
204 }
205
206 static void
207 get_slot_component_masks_and_interp_types(struct exec_list *var_list,
208 uint8_t *comps,
209 uint8_t *interp_type,
210 uint8_t *interp_loc,
211 gl_shader_stage stage,
212 bool default_to_smooth_interp)
213 {
214 nir_foreach_variable_safe(var, var_list) {
215 assert(var->data.location >= 0);
216
217 /* Only remap things that aren't built-ins.
218 * TODO: add TES patch support.
219 */
220 if (var->data.location >= VARYING_SLOT_VAR0 &&
221 var->data.location - VARYING_SLOT_VAR0 < 32) {
222
223 const struct glsl_type *type = var->type;
224 if (nir_is_per_vertex_io(var, stage)) {
225 assert(glsl_type_is_array(type));
226 type = glsl_get_array_element(type);
227 }
228
229 unsigned location = var->data.location - VARYING_SLOT_VAR0;
230 unsigned elements =
231 glsl_get_vector_elements(glsl_without_array(type));
232
233 bool dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
234 unsigned slots = glsl_count_attribute_slots(type, false);
235 unsigned comps_slot2 = 0;
236 for (unsigned i = 0; i < slots; i++) {
237 interp_type[location + i] =
238 get_interp_type(var, default_to_smooth_interp);
239 interp_loc[location + i] = get_interp_loc(var);
240
241 if (dual_slot) {
242 if (i & 1) {
243 comps[location + i] |= ((1 << comps_slot2) - 1);
244 } else {
245 unsigned num_comps = 4 - var->data.location_frac;
246 comps_slot2 = (elements * 2) - num_comps;
247
248 /* Assume ARB_enhanced_layouts packing rules for doubles */
249 assert(var->data.location_frac == 0 ||
250 var->data.location_frac == 2);
251 assert(comps_slot2 <= 4);
252
253 comps[location + i] |=
254 ((1 << num_comps) - 1) << var->data.location_frac;
255 }
256 } else {
257 comps[location + i] |=
258 ((1 << elements) - 1) << var->data.location_frac;
259 }
260 }
261 }
262 }
263 }
264
265 struct varying_loc
266 {
267 uint8_t component;
268 uint32_t location;
269 };
270
271 static void
272 remap_slots_and_components(struct exec_list *var_list, gl_shader_stage stage,
273 struct varying_loc (*remap)[4],
274 uint64_t *slots_used, uint64_t *out_slots_read)
275 {
276 uint64_t out_slots_read_tmp = 0;
277
278 /* We don't touch builtins so just copy the bitmask */
279 uint64_t slots_used_tmp =
280 *slots_used & (((uint64_t)1 << (VARYING_SLOT_VAR0 - 1)) - 1);
281
282 nir_foreach_variable(var, var_list) {
283 assert(var->data.location >= 0);
284
285 /* Only remap things that aren't built-ins */
286 if (var->data.location >= VARYING_SLOT_VAR0 &&
287 var->data.location - VARYING_SLOT_VAR0 < 32) {
288 assert(var->data.location - VARYING_SLOT_VAR0 < 32);
289
290 const struct glsl_type *type = var->type;
291 if (nir_is_per_vertex_io(var, stage)) {
292 assert(glsl_type_is_array(type));
293 type = glsl_get_array_element(type);
294 }
295
296 unsigned num_slots = glsl_count_attribute_slots(type, false);
297 bool used_across_stages = false;
298 bool outputs_read = false;
299
300 unsigned location = var->data.location - VARYING_SLOT_VAR0;
301 struct varying_loc *new_loc = &remap[location][var->data.location_frac];
302
303 uint64_t slots = (((uint64_t)1 << num_slots) - 1) << var->data.location;
304 if (slots & *slots_used)
305 used_across_stages = true;
306
307 if (slots & *out_slots_read)
308 outputs_read = true;
309
310 if (new_loc->location) {
311 var->data.location = new_loc->location;
312 var->data.location_frac = new_loc->component;
313 }
314
315 if (var->data.always_active_io) {
316 /* We can't apply link time optimisations (specifically array
317 * splitting) to these so we need to copy the existing mask
318 * otherwise we will mess up the mask for things like partially
319 * marked arrays.
320 */
321 if (used_across_stages) {
322 slots_used_tmp |=
323 *slots_used & (((uint64_t)1 << num_slots) - 1) << var->data.location;
324 }
325
326 if (outputs_read) {
327 out_slots_read_tmp |=
328 *out_slots_read & (((uint64_t)1 << num_slots) - 1) << var->data.location;
329 }
330
331 } else {
332 for (unsigned i = 0; i < num_slots; i++) {
333 if (used_across_stages)
334 slots_used_tmp |= (uint64_t)1 << (var->data.location + i);
335
336 if (outputs_read)
337 out_slots_read_tmp |= (uint64_t)1 << (var->data.location + i);
338 }
339 }
340 }
341 }
342
343 *slots_used = slots_used_tmp;
344 *out_slots_read = out_slots_read_tmp;
345 }
346
347 /* If there are empty components in the slot compact the remaining components
348 * as close to component 0 as possible. This will make it easier to fill the
349 * empty components with components from a different slot in a following pass.
350 */
351 static void
352 compact_components(nir_shader *producer, nir_shader *consumer, uint8_t *comps,
353 uint8_t *interp_type, uint8_t *interp_loc,
354 bool default_to_smooth_interp)
355 {
356 struct exec_list *input_list = &consumer->inputs;
357 struct exec_list *output_list = &producer->outputs;
358 struct varying_loc remap[32][4] = {{{0}, {0}}};
359
360 /* Create a cursor for each interpolation type */
361 unsigned cursor[4] = {0};
362
363 /* We only need to pass over one stage and we choose the consumer as it seems
364 * to cause a larger reduction in instruction counts (tested on i965).
365 */
366 nir_foreach_variable(var, input_list) {
367
368 /* Only remap things that aren't builtins.
369 * TODO: add TES patch support.
370 */
371 if (var->data.location >= VARYING_SLOT_VAR0 &&
372 var->data.location - VARYING_SLOT_VAR0 < 32) {
373
374 /* We can't repack xfb varyings. */
375 if (var->data.always_active_io)
376 continue;
377
378 const struct glsl_type *type = var->type;
379 if (nir_is_per_vertex_io(var, consumer->info.stage)) {
380 assert(glsl_type_is_array(type));
381 type = glsl_get_array_element(type);
382 }
383
384 /* Skip types that require more complex packing handling.
385 * TODO: add support for these types.
386 */
387 if (glsl_type_is_array(type) ||
388 glsl_type_is_dual_slot(type) ||
389 glsl_type_is_matrix(type) ||
390 glsl_type_is_struct(type) ||
391 glsl_type_is_64bit(type))
392 continue;
393
394 /* We ignore complex types above and all other vector types should
395 * have been split into scalar variables by the lower_io_to_scalar
396 * pass. The only exeption should by OpenGL xfb varyings.
397 */
398 if (glsl_get_vector_elements(type) != 1)
399 continue;
400
401 unsigned location = var->data.location - VARYING_SLOT_VAR0;
402 uint8_t used_comps = comps[location];
403
404 /* If there are no empty components there is nothing more for us to do.
405 */
406 if (used_comps == 0xf)
407 continue;
408
409 bool found_new_offset = false;
410 uint8_t interp = get_interp_type(var, default_to_smooth_interp);
411 for (; cursor[interp] < 32; cursor[interp]++) {
412 uint8_t cursor_used_comps = comps[cursor[interp]];
413
414 /* We couldn't find anywhere to pack the varying continue on. */
415 if (cursor[interp] == location &&
416 (var->data.location_frac == 0 ||
417 cursor_used_comps & ((1 << (var->data.location_frac)) - 1)))
418 break;
419
420 /* We can only pack varyings with matching interpolation types */
421 if (interp_type[cursor[interp]] != interp)
422 continue;
423
424 /* Interpolation loc must match also.
425 * TODO: i965 can handle these if they don't match, but the
426 * radeonsi nir backend handles everything as vec4s and so expects
427 * this to be the same for all components. We could make this
428 * check driver specfific or drop it if NIR ever become the only
429 * radeonsi backend.
430 */
431 if (interp_loc[cursor[interp]] != get_interp_loc(var))
432 continue;
433
434 /* If the slot is empty just skip it for now, compact_var_list()
435 * can be called after this function to remove empty slots for us.
436 * TODO: finish implementing compact_var_list() requires array and
437 * matrix splitting.
438 */
439 if (!cursor_used_comps)
440 continue;
441
442 uint8_t unused_comps = ~cursor_used_comps;
443
444 for (unsigned i = 0; i < 4; i++) {
445 uint8_t new_var_comps = 1 << i;
446 if (unused_comps & new_var_comps) {
447 remap[location][var->data.location_frac].component = i;
448 remap[location][var->data.location_frac].location =
449 cursor[interp] + VARYING_SLOT_VAR0;
450
451 found_new_offset = true;
452
453 /* Turn off the mask for the component we are remapping */
454 if (comps[location] & 1 << var->data.location_frac) {
455 comps[location] ^= 1 << var->data.location_frac;
456 comps[cursor[interp]] |= new_var_comps;
457 }
458 break;
459 }
460 }
461
462 if (found_new_offset)
463 break;
464 }
465 }
466 }
467
468 uint64_t zero = 0;
469 remap_slots_and_components(input_list, consumer->info.stage, remap,
470 &consumer->info.inputs_read, &zero);
471 remap_slots_and_components(output_list, producer->info.stage, remap,
472 &producer->info.outputs_written,
473 &producer->info.outputs_read);
474 }
475
476 /* We assume that this has been called more-or-less directly after
477 * remove_unused_varyings. At this point, all of the varyings that we
478 * aren't going to be using have been completely removed and the
479 * inputs_read and outputs_written fields in nir_shader_info reflect
480 * this. Therefore, the total set of valid slots is the OR of the two
481 * sets of varyings; this accounts for varyings which one side may need
482 * to read/write even if the other doesn't. This can happen if, for
483 * instance, an array is used indirectly from one side causing it to be
484 * unsplittable but directly from the other.
485 */
486 void
487 nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
488 bool default_to_smooth_interp)
489 {
490 assert(producer->info.stage != MESA_SHADER_FRAGMENT);
491 assert(consumer->info.stage != MESA_SHADER_VERTEX);
492
493 uint8_t comps[32] = {0};
494 uint8_t interp_type[32] = {0};
495 uint8_t interp_loc[32] = {0};
496
497 get_slot_component_masks_and_interp_types(&producer->outputs, comps,
498 interp_type, interp_loc,
499 producer->info.stage,
500 default_to_smooth_interp);
501 get_slot_component_masks_and_interp_types(&consumer->inputs, comps,
502 interp_type, interp_loc,
503 consumer->info.stage,
504 default_to_smooth_interp);
505
506 compact_components(producer, consumer, comps, interp_type, interp_loc,
507 default_to_smooth_interp);
508 }