nir: save IO semantics in lowered IO intrinsics
[mesa.git] / src / compiler / nir / nir_gather_info.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_deref.h"
26 #include "main/menums.h"
27
28 static void
29 get_deref_info(nir_shader *shader, nir_variable *var, nir_deref_instr *deref,
30 bool *cross_invocation, bool *indirect)
31 {
32 *cross_invocation = false;
33 *indirect = false;
34
35 const bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
36
37 nir_deref_path path;
38 nir_deref_path_init(&path, deref, NULL);
39 assert(path.path[0]->deref_type == nir_deref_type_var);
40 nir_deref_instr **p = &path.path[1];
41
42 /* Vertex index is the outermost array index. */
43 if (per_vertex) {
44 assert((*p)->deref_type == nir_deref_type_array);
45 nir_instr *vertex_index_instr = (*p)->arr.index.ssa->parent_instr;
46 *cross_invocation =
47 vertex_index_instr->type != nir_instr_type_intrinsic ||
48 nir_instr_as_intrinsic(vertex_index_instr)->intrinsic !=
49 nir_intrinsic_load_invocation_id;
50 p++;
51 }
52
53 /* We always lower indirect dereferences for "compact" array vars. */
54 if (!path.path[0]->var->data.compact) {
55 /* Non-compact array vars: find out if they are indirect. */
56 for (; *p; p++) {
57 if ((*p)->deref_type == nir_deref_type_array) {
58 *indirect |= !nir_src_is_const((*p)->arr.index);
59 } else if ((*p)->deref_type == nir_deref_type_struct) {
60 /* Struct indices are always constant. */
61 } else {
62 unreachable("Unsupported deref type");
63 }
64 }
65 }
66
67 nir_deref_path_finish(&path);
68 }
69
70 static void
71 set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len,
72 nir_deref_instr *deref, bool is_output_read)
73 {
74 for (int i = 0; i < len; i++) {
75 assert(var->data.location != -1);
76
77 int idx = var->data.location + offset + i;
78 bool is_patch_generic = var->data.patch &&
79 idx != VARYING_SLOT_TESS_LEVEL_INNER &&
80 idx != VARYING_SLOT_TESS_LEVEL_OUTER &&
81 idx != VARYING_SLOT_BOUNDING_BOX0 &&
82 idx != VARYING_SLOT_BOUNDING_BOX1;
83 uint64_t bitfield;
84
85 if (is_patch_generic) {
86 assert(idx >= VARYING_SLOT_PATCH0 && idx < VARYING_SLOT_TESS_MAX);
87 bitfield = BITFIELD64_BIT(idx - VARYING_SLOT_PATCH0);
88 }
89 else {
90 assert(idx < VARYING_SLOT_MAX);
91 bitfield = BITFIELD64_BIT(idx);
92 }
93
94 bool cross_invocation;
95 bool indirect;
96 get_deref_info(shader, var, deref, &cross_invocation, &indirect);
97
98 if (var->data.mode == nir_var_shader_in) {
99 if (is_patch_generic) {
100 shader->info.patch_inputs_read |= bitfield;
101 if (indirect)
102 shader->info.patch_inputs_read_indirectly |= bitfield;
103 } else {
104 shader->info.inputs_read |= bitfield;
105 if (indirect)
106 shader->info.inputs_read_indirectly |= bitfield;
107 }
108
109 if (cross_invocation && shader->info.stage == MESA_SHADER_TESS_CTRL)
110 shader->info.tess.tcs_cross_invocation_inputs_read |= bitfield;
111
112 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
113 shader->info.fs.uses_sample_qualifier |= var->data.sample;
114 }
115 } else {
116 assert(var->data.mode == nir_var_shader_out);
117 if (is_output_read) {
118 if (is_patch_generic) {
119 shader->info.patch_outputs_read |= bitfield;
120 if (indirect)
121 shader->info.patch_outputs_accessed_indirectly |= bitfield;
122 } else {
123 shader->info.outputs_read |= bitfield;
124 if (indirect)
125 shader->info.outputs_accessed_indirectly |= bitfield;
126 }
127
128 if (cross_invocation && shader->info.stage == MESA_SHADER_TESS_CTRL)
129 shader->info.tess.tcs_cross_invocation_outputs_read |= bitfield;
130 } else {
131 if (is_patch_generic) {
132 shader->info.patch_outputs_written |= bitfield;
133 if (indirect)
134 shader->info.patch_outputs_accessed_indirectly |= bitfield;
135 } else if (!var->data.read_only) {
136 shader->info.outputs_written |= bitfield;
137 if (indirect)
138 shader->info.outputs_accessed_indirectly |= bitfield;
139 }
140 }
141
142
143 if (var->data.fb_fetch_output)
144 shader->info.outputs_read |= bitfield;
145 }
146 }
147 }
148
149 /**
150 * Mark an entire variable as used. Caller must ensure that the variable
151 * represents a shader input or output.
152 */
153 static void
154 mark_whole_variable(nir_shader *shader, nir_variable *var,
155 nir_deref_instr *deref, bool is_output_read)
156 {
157 const struct glsl_type *type = var->type;
158
159 if (nir_is_per_vertex_io(var, shader->info.stage)) {
160 assert(glsl_type_is_array(type));
161 type = glsl_get_array_element(type);
162 }
163
164 if (var->data.per_view) {
165 /* TODO: Per view and Per Vertex are not currently used together. When
166 * they start to be used (e.g. when adding Primitive Replication for GS
167 * on Intel), verify that "peeling" the type twice is correct. This
168 * assert ensures we remember it.
169 */
170 assert(!nir_is_per_vertex_io(var, shader->info.stage));
171 assert(glsl_type_is_array(type));
172 type = glsl_get_array_element(type);
173 }
174
175 const unsigned slots =
176 var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4)
177 : glsl_count_attribute_slots(type, false);
178
179 set_io_mask(shader, var, 0, slots, deref, is_output_read);
180 }
181
182 static unsigned
183 get_io_offset(nir_deref_instr *deref, bool is_vertex_input, bool per_vertex)
184 {
185 unsigned offset = 0;
186
187 for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
188 if (d->deref_type == nir_deref_type_array) {
189 if (per_vertex && nir_deref_instr_parent(d)->deref_type == nir_deref_type_var)
190 break;
191
192 if (!nir_src_is_const(d->arr.index))
193 return -1;
194
195 offset += glsl_count_attribute_slots(d->type, is_vertex_input) *
196 nir_src_as_uint(d->arr.index);
197 }
198 /* TODO: we can get the offset for structs here see nir_lower_io() */
199 }
200
201 return offset;
202 }
203
204 /**
205 * Try to mark a portion of the given varying as used. Caller must ensure
206 * that the variable represents a shader input or output.
207 *
208 * If the index can't be interpreted as a constant, or some other problem
209 * occurs, then nothing will be marked and false will be returned.
210 */
211 static bool
212 try_mask_partial_io(nir_shader *shader, nir_variable *var,
213 nir_deref_instr *deref, bool is_output_read)
214 {
215 const struct glsl_type *type = var->type;
216 bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
217
218 if (per_vertex) {
219 assert(glsl_type_is_array(type));
220 type = glsl_get_array_element(type);
221 }
222
223 /* Per view variables will be considered as a whole. */
224 if (var->data.per_view)
225 return false;
226
227 /* The code below only handles:
228 *
229 * - Indexing into matrices
230 * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
231 *
232 * For now, we just give up if we see varying structs and arrays of structs
233 * here marking the entire variable as used.
234 */
235 if (!(glsl_type_is_matrix(type) ||
236 (glsl_type_is_array(type) && !var->data.compact &&
237 (glsl_type_is_numeric(glsl_without_array(type)) ||
238 glsl_type_is_boolean(glsl_without_array(type)))))) {
239
240 /* If we don't know how to handle this case, give up and let the
241 * caller mark the whole variable as used.
242 */
243 return false;
244 }
245
246 unsigned offset = get_io_offset(deref, false, per_vertex);
247 if (offset == -1)
248 return false;
249
250 unsigned num_elems;
251 unsigned elem_width = 1;
252 unsigned mat_cols = 1;
253 if (glsl_type_is_array(type)) {
254 num_elems = glsl_get_aoa_size(type);
255 if (glsl_type_is_matrix(glsl_without_array(type)))
256 mat_cols = glsl_get_matrix_columns(glsl_without_array(type));
257 } else {
258 num_elems = glsl_get_matrix_columns(type);
259 }
260
261 /* double element width for double types that takes two slots */
262 if (glsl_type_is_dual_slot(glsl_without_array(type)))
263 elem_width *= 2;
264
265 if (offset >= num_elems * elem_width * mat_cols) {
266 /* Constant index outside the bounds of the matrix/array. This could
267 * arise as a result of constant folding of a legal GLSL program.
268 *
269 * Even though the spec says that indexing outside the bounds of a
270 * matrix/array results in undefined behaviour, we don't want to pass
271 * out-of-range values to set_io_mask() (since this could result in
272 * slots that don't exist being marked as used), so just let the caller
273 * mark the whole variable as used.
274 */
275 return false;
276 }
277
278 set_io_mask(shader, var, offset, elem_width, deref, is_output_read);
279 return true;
280 }
281
282 static void
283 update_memory_written_for_deref(nir_shader *shader, nir_deref_instr *deref)
284 {
285 switch (deref->mode) {
286 case nir_var_mem_ssbo:
287 case nir_var_mem_global:
288 shader->info.writes_memory = true;
289 break;
290 default:
291 /* Nothing to do. */
292 break;
293 }
294 }
295
296 static void
297 gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader,
298 void *dead_ctx)
299 {
300 switch (instr->intrinsic) {
301 case nir_intrinsic_demote:
302 case nir_intrinsic_demote_if:
303 shader->info.fs.uses_demote = true;
304 /* fallthrough - quads with helper lanes only might be discarded entirely */
305 case nir_intrinsic_discard:
306 case nir_intrinsic_discard_if:
307 /* Freedreno uses the discard_if intrinsic to end GS invocations that
308 * don't produce a vertex, so we only set uses_discard if executing on
309 * a fragment shader. */
310 if (shader->info.stage == MESA_SHADER_FRAGMENT)
311 shader->info.fs.uses_discard = true;
312 break;
313
314 case nir_intrinsic_interp_deref_at_centroid:
315 case nir_intrinsic_interp_deref_at_sample:
316 case nir_intrinsic_interp_deref_at_offset:
317 case nir_intrinsic_interp_deref_at_vertex:
318 case nir_intrinsic_load_deref:
319 case nir_intrinsic_store_deref:{
320 nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
321 if (deref->mode == nir_var_shader_in ||
322 deref->mode == nir_var_shader_out) {
323 nir_variable *var = nir_deref_instr_get_variable(deref);
324 bool is_output_read = false;
325 if (var->data.mode == nir_var_shader_out &&
326 instr->intrinsic == nir_intrinsic_load_deref)
327 is_output_read = true;
328
329 if (!try_mask_partial_io(shader, var, deref, is_output_read))
330 mark_whole_variable(shader, var, deref, is_output_read);
331
332 /* We need to track which input_reads bits correspond to a
333 * dvec3/dvec4 input attribute */
334 if (shader->info.stage == MESA_SHADER_VERTEX &&
335 var->data.mode == nir_var_shader_in &&
336 glsl_type_is_dual_slot(glsl_without_array(var->type))) {
337 for (unsigned i = 0; i < glsl_count_attribute_slots(var->type, false); i++) {
338 int idx = var->data.location + i;
339 shader->info.vs.double_inputs |= BITFIELD64_BIT(idx);
340 }
341 }
342 }
343 if (instr->intrinsic == nir_intrinsic_store_deref)
344 update_memory_written_for_deref(shader, deref);
345 break;
346 }
347
348 case nir_intrinsic_load_draw_id:
349 case nir_intrinsic_load_frag_coord:
350 case nir_intrinsic_load_point_coord:
351 case nir_intrinsic_load_line_coord:
352 case nir_intrinsic_load_front_face:
353 case nir_intrinsic_load_vertex_id:
354 case nir_intrinsic_load_vertex_id_zero_base:
355 case nir_intrinsic_load_base_vertex:
356 case nir_intrinsic_load_first_vertex:
357 case nir_intrinsic_load_is_indexed_draw:
358 case nir_intrinsic_load_base_instance:
359 case nir_intrinsic_load_instance_id:
360 case nir_intrinsic_load_sample_id:
361 case nir_intrinsic_load_sample_pos:
362 case nir_intrinsic_load_sample_mask_in:
363 case nir_intrinsic_load_primitive_id:
364 case nir_intrinsic_load_invocation_id:
365 case nir_intrinsic_load_local_invocation_id:
366 case nir_intrinsic_load_local_invocation_index:
367 case nir_intrinsic_load_work_group_id:
368 case nir_intrinsic_load_num_work_groups:
369 case nir_intrinsic_load_tess_coord:
370 case nir_intrinsic_load_tess_level_outer:
371 case nir_intrinsic_load_tess_level_inner:
372 case nir_intrinsic_load_patch_vertices_in:
373 shader->info.system_values_read |=
374 (1ull << nir_system_value_from_intrinsic(instr->intrinsic));
375 break;
376
377 case nir_intrinsic_quad_broadcast:
378 case nir_intrinsic_quad_swap_horizontal:
379 case nir_intrinsic_quad_swap_vertical:
380 case nir_intrinsic_quad_swap_diagonal:
381 if (shader->info.stage == MESA_SHADER_FRAGMENT)
382 shader->info.fs.needs_helper_invocations = true;
383 break;
384
385 case nir_intrinsic_end_primitive:
386 case nir_intrinsic_end_primitive_with_counter:
387 assert(shader->info.stage == MESA_SHADER_GEOMETRY);
388 shader->info.gs.uses_end_primitive = 1;
389 /* fall through */
390
391 case nir_intrinsic_emit_vertex:
392 case nir_intrinsic_emit_vertex_with_counter:
393 shader->info.gs.active_stream_mask |= 1 << nir_intrinsic_stream_id(instr);
394
395 break;
396
397 case nir_intrinsic_atomic_counter_inc:
398 case nir_intrinsic_atomic_counter_inc_deref:
399 case nir_intrinsic_atomic_counter_add:
400 case nir_intrinsic_atomic_counter_add_deref:
401 case nir_intrinsic_atomic_counter_pre_dec:
402 case nir_intrinsic_atomic_counter_pre_dec_deref:
403 case nir_intrinsic_atomic_counter_post_dec:
404 case nir_intrinsic_atomic_counter_post_dec_deref:
405 case nir_intrinsic_atomic_counter_min:
406 case nir_intrinsic_atomic_counter_min_deref:
407 case nir_intrinsic_atomic_counter_max:
408 case nir_intrinsic_atomic_counter_max_deref:
409 case nir_intrinsic_atomic_counter_and:
410 case nir_intrinsic_atomic_counter_and_deref:
411 case nir_intrinsic_atomic_counter_or:
412 case nir_intrinsic_atomic_counter_or_deref:
413 case nir_intrinsic_atomic_counter_xor:
414 case nir_intrinsic_atomic_counter_xor_deref:
415 case nir_intrinsic_atomic_counter_exchange:
416 case nir_intrinsic_atomic_counter_exchange_deref:
417 case nir_intrinsic_atomic_counter_comp_swap:
418 case nir_intrinsic_atomic_counter_comp_swap_deref:
419 case nir_intrinsic_bindless_image_atomic_add:
420 case nir_intrinsic_bindless_image_atomic_and:
421 case nir_intrinsic_bindless_image_atomic_comp_swap:
422 case nir_intrinsic_bindless_image_atomic_dec_wrap:
423 case nir_intrinsic_bindless_image_atomic_exchange:
424 case nir_intrinsic_bindless_image_atomic_fadd:
425 case nir_intrinsic_bindless_image_atomic_imax:
426 case nir_intrinsic_bindless_image_atomic_imin:
427 case nir_intrinsic_bindless_image_atomic_inc_wrap:
428 case nir_intrinsic_bindless_image_atomic_or:
429 case nir_intrinsic_bindless_image_atomic_umax:
430 case nir_intrinsic_bindless_image_atomic_umin:
431 case nir_intrinsic_bindless_image_atomic_xor:
432 case nir_intrinsic_bindless_image_store:
433 case nir_intrinsic_bindless_image_store_raw_intel:
434 case nir_intrinsic_global_atomic_add:
435 case nir_intrinsic_global_atomic_and:
436 case nir_intrinsic_global_atomic_comp_swap:
437 case nir_intrinsic_global_atomic_exchange:
438 case nir_intrinsic_global_atomic_fadd:
439 case nir_intrinsic_global_atomic_fcomp_swap:
440 case nir_intrinsic_global_atomic_fmax:
441 case nir_intrinsic_global_atomic_fmin:
442 case nir_intrinsic_global_atomic_imax:
443 case nir_intrinsic_global_atomic_imin:
444 case nir_intrinsic_global_atomic_or:
445 case nir_intrinsic_global_atomic_umax:
446 case nir_intrinsic_global_atomic_umin:
447 case nir_intrinsic_global_atomic_xor:
448 case nir_intrinsic_image_atomic_add:
449 case nir_intrinsic_image_atomic_and:
450 case nir_intrinsic_image_atomic_comp_swap:
451 case nir_intrinsic_image_atomic_dec_wrap:
452 case nir_intrinsic_image_atomic_exchange:
453 case nir_intrinsic_image_atomic_fadd:
454 case nir_intrinsic_image_atomic_imax:
455 case nir_intrinsic_image_atomic_imin:
456 case nir_intrinsic_image_atomic_inc_wrap:
457 case nir_intrinsic_image_atomic_or:
458 case nir_intrinsic_image_atomic_umax:
459 case nir_intrinsic_image_atomic_umin:
460 case nir_intrinsic_image_atomic_xor:
461 case nir_intrinsic_image_deref_atomic_add:
462 case nir_intrinsic_image_deref_atomic_and:
463 case nir_intrinsic_image_deref_atomic_comp_swap:
464 case nir_intrinsic_image_deref_atomic_dec_wrap:
465 case nir_intrinsic_image_deref_atomic_exchange:
466 case nir_intrinsic_image_deref_atomic_fadd:
467 case nir_intrinsic_image_deref_atomic_imax:
468 case nir_intrinsic_image_deref_atomic_imin:
469 case nir_intrinsic_image_deref_atomic_inc_wrap:
470 case nir_intrinsic_image_deref_atomic_or:
471 case nir_intrinsic_image_deref_atomic_umax:
472 case nir_intrinsic_image_deref_atomic_umin:
473 case nir_intrinsic_image_deref_atomic_xor:
474 case nir_intrinsic_image_deref_store:
475 case nir_intrinsic_image_deref_store_raw_intel:
476 case nir_intrinsic_image_store:
477 case nir_intrinsic_image_store_raw_intel:
478 case nir_intrinsic_ssbo_atomic_add:
479 case nir_intrinsic_ssbo_atomic_add_ir3:
480 case nir_intrinsic_ssbo_atomic_and:
481 case nir_intrinsic_ssbo_atomic_and_ir3:
482 case nir_intrinsic_ssbo_atomic_comp_swap:
483 case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
484 case nir_intrinsic_ssbo_atomic_exchange:
485 case nir_intrinsic_ssbo_atomic_exchange_ir3:
486 case nir_intrinsic_ssbo_atomic_fadd:
487 case nir_intrinsic_ssbo_atomic_fcomp_swap:
488 case nir_intrinsic_ssbo_atomic_fmax:
489 case nir_intrinsic_ssbo_atomic_fmin:
490 case nir_intrinsic_ssbo_atomic_imax:
491 case nir_intrinsic_ssbo_atomic_imax_ir3:
492 case nir_intrinsic_ssbo_atomic_imin:
493 case nir_intrinsic_ssbo_atomic_imin_ir3:
494 case nir_intrinsic_ssbo_atomic_or:
495 case nir_intrinsic_ssbo_atomic_or_ir3:
496 case nir_intrinsic_ssbo_atomic_umax:
497 case nir_intrinsic_ssbo_atomic_umax_ir3:
498 case nir_intrinsic_ssbo_atomic_umin:
499 case nir_intrinsic_ssbo_atomic_umin_ir3:
500 case nir_intrinsic_ssbo_atomic_xor:
501 case nir_intrinsic_ssbo_atomic_xor_ir3:
502 case nir_intrinsic_store_global:
503 case nir_intrinsic_store_global_ir3:
504 case nir_intrinsic_store_ssbo:
505 case nir_intrinsic_store_ssbo_ir3:
506 /* Only set this for globally visible memory, not scratch and not
507 * shared.
508 */
509 shader->info.writes_memory = true;
510 break;
511
512 case nir_intrinsic_deref_atomic_add:
513 case nir_intrinsic_deref_atomic_imin:
514 case nir_intrinsic_deref_atomic_umin:
515 case nir_intrinsic_deref_atomic_imax:
516 case nir_intrinsic_deref_atomic_umax:
517 case nir_intrinsic_deref_atomic_and:
518 case nir_intrinsic_deref_atomic_or:
519 case nir_intrinsic_deref_atomic_xor:
520 case nir_intrinsic_deref_atomic_exchange:
521 case nir_intrinsic_deref_atomic_comp_swap:
522 update_memory_written_for_deref(shader, nir_src_as_deref(instr->src[0]));
523 break;
524
525 default:
526 break;
527 }
528 }
529
530 static void
531 gather_tex_info(nir_tex_instr *instr, nir_shader *shader)
532 {
533 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
534 nir_tex_instr_has_implicit_derivative(instr))
535 shader->info.fs.needs_helper_invocations = true;
536
537 switch (instr->op) {
538 case nir_texop_tg4:
539 shader->info.uses_texture_gather = true;
540 break;
541 default:
542 break;
543 }
544 }
545
546 static void
547 gather_alu_info(nir_alu_instr *instr, nir_shader *shader)
548 {
549 switch (instr->op) {
550 case nir_op_fddx:
551 case nir_op_fddy:
552 shader->info.uses_fddx_fddy = true;
553 /* Fall through */
554 case nir_op_fddx_fine:
555 case nir_op_fddy_fine:
556 case nir_op_fddx_coarse:
557 case nir_op_fddy_coarse:
558 if (shader->info.stage == MESA_SHADER_FRAGMENT)
559 shader->info.fs.needs_helper_invocations = true;
560 break;
561 default:
562 break;
563 }
564
565 shader->info.uses_64bit |= instr->dest.dest.ssa.bit_size == 64;
566 unsigned num_srcs = nir_op_infos[instr->op].num_inputs;
567 for (unsigned i = 0; i < num_srcs; i++) {
568 shader->info.uses_64bit |= nir_src_bit_size(instr->src[i].src) == 64;
569 }
570 }
571
572 static void
573 gather_info_block(nir_block *block, nir_shader *shader, void *dead_ctx)
574 {
575 nir_foreach_instr(instr, block) {
576 switch (instr->type) {
577 case nir_instr_type_alu:
578 gather_alu_info(nir_instr_as_alu(instr), shader);
579 break;
580 case nir_instr_type_intrinsic:
581 gather_intrinsic_info(nir_instr_as_intrinsic(instr), shader, dead_ctx);
582 break;
583 case nir_instr_type_tex:
584 gather_tex_info(nir_instr_as_tex(instr), shader);
585 break;
586 case nir_instr_type_call:
587 assert(!"nir_shader_gather_info only works if functions are inlined");
588 break;
589 default:
590 break;
591 }
592 }
593 }
594
595 void
596 nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
597 {
598 shader->info.num_textures = 0;
599 shader->info.num_images = 0;
600 shader->info.image_buffers = 0;
601 shader->info.msaa_images = 0;
602
603 nir_foreach_uniform_variable(var, shader) {
604 /* Bindless textures and images don't use non-bindless slots.
605 * Interface blocks imply inputs, outputs, UBO, or SSBO, which can only
606 * mean bindless.
607 */
608 if (var->data.bindless || var->interface_type)
609 continue;
610
611 shader->info.num_textures += glsl_type_get_sampler_count(var->type);
612
613 unsigned num_image_slots = glsl_type_get_image_count(var->type);
614 if (num_image_slots) {
615 const struct glsl_type *image_type = glsl_without_array(var->type);
616
617 if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_BUF) {
618 shader->info.image_buffers |=
619 BITFIELD_RANGE(shader->info.num_images, num_image_slots);
620 }
621 if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_MS) {
622 shader->info.msaa_images |=
623 BITFIELD_RANGE(shader->info.num_images, num_image_slots);
624 }
625 shader->info.num_images += num_image_slots;
626 }
627 }
628
629 shader->info.inputs_read = 0;
630 shader->info.outputs_written = 0;
631 shader->info.outputs_read = 0;
632 shader->info.patch_outputs_read = 0;
633 shader->info.patch_inputs_read = 0;
634 shader->info.patch_outputs_written = 0;
635 shader->info.system_values_read = 0;
636 shader->info.inputs_read_indirectly = 0;
637 shader->info.outputs_accessed_indirectly = 0;
638 shader->info.patch_inputs_read_indirectly = 0;
639 shader->info.patch_outputs_accessed_indirectly = 0;
640
641 if (shader->info.stage == MESA_SHADER_VERTEX) {
642 shader->info.vs.double_inputs = 0;
643 }
644 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
645 shader->info.fs.uses_sample_qualifier = false;
646 shader->info.fs.uses_discard = false;
647 shader->info.fs.uses_demote = false;
648 shader->info.fs.needs_helper_invocations = false;
649 }
650 if (shader->info.stage == MESA_SHADER_TESS_CTRL) {
651 shader->info.tess.tcs_cross_invocation_inputs_read = 0;
652 shader->info.tess.tcs_cross_invocation_outputs_read = 0;
653 }
654
655 shader->info.writes_memory = shader->info.has_transform_feedback_varyings;
656
657 void *dead_ctx = ralloc_context(NULL);
658 nir_foreach_block(block, entrypoint) {
659 gather_info_block(block, shader, dead_ctx);
660 }
661 ralloc_free(dead_ctx);
662 }