2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_deref.h"
26 #include "main/menums.h"
29 get_deref_info(nir_shader
*shader
, nir_variable
*var
, nir_deref_instr
*deref
,
30 bool *cross_invocation
, bool *indirect
)
32 *cross_invocation
= false;
35 const bool per_vertex
= nir_is_per_vertex_io(var
, shader
->info
.stage
);
38 nir_deref_path_init(&path
, deref
, NULL
);
39 assert(path
.path
[0]->deref_type
== nir_deref_type_var
);
40 nir_deref_instr
**p
= &path
.path
[1];
42 /* Vertex index is the outermost array index. */
44 assert((*p
)->deref_type
== nir_deref_type_array
);
45 nir_instr
*vertex_index_instr
= (*p
)->arr
.index
.ssa
->parent_instr
;
47 vertex_index_instr
->type
!= nir_instr_type_intrinsic
||
48 nir_instr_as_intrinsic(vertex_index_instr
)->intrinsic
!=
49 nir_intrinsic_load_invocation_id
;
53 /* We always lower indirect dereferences for "compact" array vars. */
54 if (!path
.path
[0]->var
->data
.compact
) {
55 /* Non-compact array vars: find out if they are indirect. */
57 if ((*p
)->deref_type
== nir_deref_type_array
) {
58 *indirect
|= !nir_src_is_const((*p
)->arr
.index
);
59 } else if ((*p
)->deref_type
== nir_deref_type_struct
) {
60 /* Struct indices are always constant. */
62 unreachable("Unsupported deref type");
67 nir_deref_path_finish(&path
);
71 set_io_mask(nir_shader
*shader
, nir_variable
*var
, int offset
, int len
,
72 nir_deref_instr
*deref
, bool is_output_read
)
74 for (int i
= 0; i
< len
; i
++) {
75 assert(var
->data
.location
!= -1);
77 int idx
= var
->data
.location
+ offset
+ i
;
78 bool is_patch_generic
= var
->data
.patch
&&
79 idx
!= VARYING_SLOT_TESS_LEVEL_INNER
&&
80 idx
!= VARYING_SLOT_TESS_LEVEL_OUTER
&&
81 idx
!= VARYING_SLOT_BOUNDING_BOX0
&&
82 idx
!= VARYING_SLOT_BOUNDING_BOX1
;
85 if (is_patch_generic
) {
86 assert(idx
>= VARYING_SLOT_PATCH0
&& idx
< VARYING_SLOT_TESS_MAX
);
87 bitfield
= BITFIELD64_BIT(idx
- VARYING_SLOT_PATCH0
);
90 assert(idx
< VARYING_SLOT_MAX
);
91 bitfield
= BITFIELD64_BIT(idx
);
94 bool cross_invocation
;
96 get_deref_info(shader
, var
, deref
, &cross_invocation
, &indirect
);
98 if (var
->data
.mode
== nir_var_shader_in
) {
99 if (is_patch_generic
) {
100 shader
->info
.patch_inputs_read
|= bitfield
;
102 shader
->info
.patch_inputs_read_indirectly
|= bitfield
;
104 shader
->info
.inputs_read
|= bitfield
;
106 shader
->info
.inputs_read_indirectly
|= bitfield
;
109 if (cross_invocation
&& shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
110 shader
->info
.tess
.tcs_cross_invocation_inputs_read
|= bitfield
;
112 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
113 shader
->info
.fs
.uses_sample_qualifier
|= var
->data
.sample
;
116 assert(var
->data
.mode
== nir_var_shader_out
);
117 if (is_output_read
) {
118 if (is_patch_generic
) {
119 shader
->info
.patch_outputs_read
|= bitfield
;
121 shader
->info
.patch_outputs_accessed_indirectly
|= bitfield
;
123 shader
->info
.outputs_read
|= bitfield
;
125 shader
->info
.outputs_accessed_indirectly
|= bitfield
;
128 if (cross_invocation
&& shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
129 shader
->info
.tess
.tcs_cross_invocation_outputs_read
|= bitfield
;
131 if (is_patch_generic
) {
132 shader
->info
.patch_outputs_written
|= bitfield
;
134 shader
->info
.patch_outputs_accessed_indirectly
|= bitfield
;
135 } else if (!var
->data
.read_only
) {
136 shader
->info
.outputs_written
|= bitfield
;
138 shader
->info
.outputs_accessed_indirectly
|= bitfield
;
143 if (var
->data
.fb_fetch_output
)
144 shader
->info
.outputs_read
|= bitfield
;
150 * Mark an entire variable as used. Caller must ensure that the variable
151 * represents a shader input or output.
154 mark_whole_variable(nir_shader
*shader
, nir_variable
*var
,
155 nir_deref_instr
*deref
, bool is_output_read
)
157 const struct glsl_type
*type
= var
->type
;
159 if (nir_is_per_vertex_io(var
, shader
->info
.stage
)) {
160 assert(glsl_type_is_array(type
));
161 type
= glsl_get_array_element(type
);
164 if (var
->data
.per_view
) {
165 /* TODO: Per view and Per Vertex are not currently used together. When
166 * they start to be used (e.g. when adding Primitive Replication for GS
167 * on Intel), verify that "peeling" the type twice is correct. This
168 * assert ensures we remember it.
170 assert(!nir_is_per_vertex_io(var
, shader
->info
.stage
));
171 assert(glsl_type_is_array(type
));
172 type
= glsl_get_array_element(type
);
175 const unsigned slots
=
176 var
->data
.compact
? DIV_ROUND_UP(glsl_get_length(type
), 4)
177 : glsl_count_attribute_slots(type
, false);
179 set_io_mask(shader
, var
, 0, slots
, deref
, is_output_read
);
183 get_io_offset(nir_deref_instr
*deref
, bool is_vertex_input
, bool per_vertex
)
187 for (nir_deref_instr
*d
= deref
; d
; d
= nir_deref_instr_parent(d
)) {
188 if (d
->deref_type
== nir_deref_type_array
) {
189 if (per_vertex
&& nir_deref_instr_parent(d
)->deref_type
== nir_deref_type_var
)
192 if (!nir_src_is_const(d
->arr
.index
))
195 offset
+= glsl_count_attribute_slots(d
->type
, is_vertex_input
) *
196 nir_src_as_uint(d
->arr
.index
);
198 /* TODO: we can get the offset for structs here see nir_lower_io() */
205 * Try to mark a portion of the given varying as used. Caller must ensure
206 * that the variable represents a shader input or output.
208 * If the index can't be interpreted as a constant, or some other problem
209 * occurs, then nothing will be marked and false will be returned.
212 try_mask_partial_io(nir_shader
*shader
, nir_variable
*var
,
213 nir_deref_instr
*deref
, bool is_output_read
)
215 const struct glsl_type
*type
= var
->type
;
216 bool per_vertex
= nir_is_per_vertex_io(var
, shader
->info
.stage
);
219 assert(glsl_type_is_array(type
));
220 type
= glsl_get_array_element(type
);
223 /* Per view variables will be considered as a whole. */
224 if (var
->data
.per_view
)
227 /* The code below only handles:
229 * - Indexing into matrices
230 * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
232 * For now, we just give up if we see varying structs and arrays of structs
233 * here marking the entire variable as used.
235 if (!(glsl_type_is_matrix(type
) ||
236 (glsl_type_is_array(type
) && !var
->data
.compact
&&
237 (glsl_type_is_numeric(glsl_without_array(type
)) ||
238 glsl_type_is_boolean(glsl_without_array(type
)))))) {
240 /* If we don't know how to handle this case, give up and let the
241 * caller mark the whole variable as used.
246 unsigned offset
= get_io_offset(deref
, false, per_vertex
);
251 unsigned elem_width
= 1;
252 unsigned mat_cols
= 1;
253 if (glsl_type_is_array(type
)) {
254 num_elems
= glsl_get_aoa_size(type
);
255 if (glsl_type_is_matrix(glsl_without_array(type
)))
256 mat_cols
= glsl_get_matrix_columns(glsl_without_array(type
));
258 num_elems
= glsl_get_matrix_columns(type
);
261 /* double element width for double types that takes two slots */
262 if (glsl_type_is_dual_slot(glsl_without_array(type
)))
265 if (offset
>= num_elems
* elem_width
* mat_cols
) {
266 /* Constant index outside the bounds of the matrix/array. This could
267 * arise as a result of constant folding of a legal GLSL program.
269 * Even though the spec says that indexing outside the bounds of a
270 * matrix/array results in undefined behaviour, we don't want to pass
271 * out-of-range values to set_io_mask() (since this could result in
272 * slots that don't exist being marked as used), so just let the caller
273 * mark the whole variable as used.
278 set_io_mask(shader
, var
, offset
, elem_width
, deref
, is_output_read
);
283 update_memory_written_for_deref(nir_shader
*shader
, nir_deref_instr
*deref
)
285 switch (deref
->mode
) {
286 case nir_var_mem_ssbo
:
287 case nir_var_mem_global
:
288 shader
->info
.writes_memory
= true;
297 gather_intrinsic_info(nir_intrinsic_instr
*instr
, nir_shader
*shader
,
300 unsigned slot_mask
= 0;
302 if (nir_intrinsic_infos
[instr
->intrinsic
].index_map
[NIR_INTRINSIC_IO_SEMANTICS
] > 0) {
303 nir_io_semantics semantics
= nir_intrinsic_io_semantics(instr
);
305 slot_mask
= BITFIELD64_RANGE(semantics
.location
, semantics
.num_slots
);
308 switch (instr
->intrinsic
) {
309 case nir_intrinsic_demote
:
310 case nir_intrinsic_demote_if
:
311 shader
->info
.fs
.uses_demote
= true;
312 /* fallthrough - quads with helper lanes only might be discarded entirely */
313 case nir_intrinsic_discard
:
314 case nir_intrinsic_discard_if
:
315 /* Freedreno uses the discard_if intrinsic to end GS invocations that
316 * don't produce a vertex, so we only set uses_discard if executing on
317 * a fragment shader. */
318 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
319 shader
->info
.fs
.uses_discard
= true;
322 case nir_intrinsic_interp_deref_at_centroid
:
323 case nir_intrinsic_interp_deref_at_sample
:
324 case nir_intrinsic_interp_deref_at_offset
:
325 case nir_intrinsic_interp_deref_at_vertex
:
326 case nir_intrinsic_load_deref
:
327 case nir_intrinsic_store_deref
:{
328 nir_deref_instr
*deref
= nir_src_as_deref(instr
->src
[0]);
329 if (deref
->mode
== nir_var_shader_in
||
330 deref
->mode
== nir_var_shader_out
) {
331 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
332 bool is_output_read
= false;
333 if (var
->data
.mode
== nir_var_shader_out
&&
334 instr
->intrinsic
== nir_intrinsic_load_deref
)
335 is_output_read
= true;
337 if (!try_mask_partial_io(shader
, var
, deref
, is_output_read
))
338 mark_whole_variable(shader
, var
, deref
, is_output_read
);
340 /* We need to track which input_reads bits correspond to a
341 * dvec3/dvec4 input attribute */
342 if (shader
->info
.stage
== MESA_SHADER_VERTEX
&&
343 var
->data
.mode
== nir_var_shader_in
&&
344 glsl_type_is_dual_slot(glsl_without_array(var
->type
))) {
345 for (unsigned i
= 0; i
< glsl_count_attribute_slots(var
->type
, false); i
++) {
346 int idx
= var
->data
.location
+ i
;
347 shader
->info
.vs
.double_inputs
|= BITFIELD64_BIT(idx
);
351 if (instr
->intrinsic
== nir_intrinsic_store_deref
)
352 update_memory_written_for_deref(shader
, deref
);
356 case nir_intrinsic_load_input
:
357 if (shader
->info
.stage
== MESA_SHADER_TESS_EVAL
)
358 shader
->info
.patch_inputs_read
|= slot_mask
;
360 shader
->info
.inputs_read
|= slot_mask
;
363 case nir_intrinsic_load_per_vertex_input
:
364 case nir_intrinsic_load_input_vertex
:
365 case nir_intrinsic_load_interpolated_input
:
366 shader
->info
.inputs_read
|= slot_mask
;
369 case nir_intrinsic_load_output
:
370 if (shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
371 shader
->info
.patch_outputs_read
|= slot_mask
;
373 shader
->info
.outputs_read
|= slot_mask
;
376 case nir_intrinsic_load_per_vertex_output
:
377 shader
->info
.outputs_read
|= slot_mask
;
380 case nir_intrinsic_store_output
:
381 if (shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
382 shader
->info
.patch_outputs_written
|= slot_mask
;
384 shader
->info
.outputs_written
|= slot_mask
;
387 case nir_intrinsic_store_per_vertex_output
:
388 shader
->info
.outputs_written
|= slot_mask
;
391 case nir_intrinsic_load_draw_id
:
392 case nir_intrinsic_load_frag_coord
:
393 case nir_intrinsic_load_point_coord
:
394 case nir_intrinsic_load_line_coord
:
395 case nir_intrinsic_load_front_face
:
396 case nir_intrinsic_load_vertex_id
:
397 case nir_intrinsic_load_vertex_id_zero_base
:
398 case nir_intrinsic_load_base_vertex
:
399 case nir_intrinsic_load_first_vertex
:
400 case nir_intrinsic_load_is_indexed_draw
:
401 case nir_intrinsic_load_base_instance
:
402 case nir_intrinsic_load_instance_id
:
403 case nir_intrinsic_load_sample_id
:
404 case nir_intrinsic_load_sample_pos
:
405 case nir_intrinsic_load_sample_mask_in
:
406 case nir_intrinsic_load_primitive_id
:
407 case nir_intrinsic_load_invocation_id
:
408 case nir_intrinsic_load_local_invocation_id
:
409 case nir_intrinsic_load_local_invocation_index
:
410 case nir_intrinsic_load_work_group_id
:
411 case nir_intrinsic_load_num_work_groups
:
412 case nir_intrinsic_load_tess_coord
:
413 case nir_intrinsic_load_tess_level_outer
:
414 case nir_intrinsic_load_tess_level_inner
:
415 case nir_intrinsic_load_patch_vertices_in
:
416 shader
->info
.system_values_read
|=
417 (1ull << nir_system_value_from_intrinsic(instr
->intrinsic
));
420 case nir_intrinsic_quad_broadcast
:
421 case nir_intrinsic_quad_swap_horizontal
:
422 case nir_intrinsic_quad_swap_vertical
:
423 case nir_intrinsic_quad_swap_diagonal
:
424 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
425 shader
->info
.fs
.needs_helper_invocations
= true;
428 case nir_intrinsic_end_primitive
:
429 case nir_intrinsic_end_primitive_with_counter
:
430 assert(shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
431 shader
->info
.gs
.uses_end_primitive
= 1;
434 case nir_intrinsic_emit_vertex
:
435 case nir_intrinsic_emit_vertex_with_counter
:
436 shader
->info
.gs
.active_stream_mask
|= 1 << nir_intrinsic_stream_id(instr
);
440 case nir_intrinsic_atomic_counter_inc
:
441 case nir_intrinsic_atomic_counter_inc_deref
:
442 case nir_intrinsic_atomic_counter_add
:
443 case nir_intrinsic_atomic_counter_add_deref
:
444 case nir_intrinsic_atomic_counter_pre_dec
:
445 case nir_intrinsic_atomic_counter_pre_dec_deref
:
446 case nir_intrinsic_atomic_counter_post_dec
:
447 case nir_intrinsic_atomic_counter_post_dec_deref
:
448 case nir_intrinsic_atomic_counter_min
:
449 case nir_intrinsic_atomic_counter_min_deref
:
450 case nir_intrinsic_atomic_counter_max
:
451 case nir_intrinsic_atomic_counter_max_deref
:
452 case nir_intrinsic_atomic_counter_and
:
453 case nir_intrinsic_atomic_counter_and_deref
:
454 case nir_intrinsic_atomic_counter_or
:
455 case nir_intrinsic_atomic_counter_or_deref
:
456 case nir_intrinsic_atomic_counter_xor
:
457 case nir_intrinsic_atomic_counter_xor_deref
:
458 case nir_intrinsic_atomic_counter_exchange
:
459 case nir_intrinsic_atomic_counter_exchange_deref
:
460 case nir_intrinsic_atomic_counter_comp_swap
:
461 case nir_intrinsic_atomic_counter_comp_swap_deref
:
462 case nir_intrinsic_bindless_image_atomic_add
:
463 case nir_intrinsic_bindless_image_atomic_and
:
464 case nir_intrinsic_bindless_image_atomic_comp_swap
:
465 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
466 case nir_intrinsic_bindless_image_atomic_exchange
:
467 case nir_intrinsic_bindless_image_atomic_fadd
:
468 case nir_intrinsic_bindless_image_atomic_imax
:
469 case nir_intrinsic_bindless_image_atomic_imin
:
470 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
471 case nir_intrinsic_bindless_image_atomic_or
:
472 case nir_intrinsic_bindless_image_atomic_umax
:
473 case nir_intrinsic_bindless_image_atomic_umin
:
474 case nir_intrinsic_bindless_image_atomic_xor
:
475 case nir_intrinsic_bindless_image_store
:
476 case nir_intrinsic_bindless_image_store_raw_intel
:
477 case nir_intrinsic_global_atomic_add
:
478 case nir_intrinsic_global_atomic_and
:
479 case nir_intrinsic_global_atomic_comp_swap
:
480 case nir_intrinsic_global_atomic_exchange
:
481 case nir_intrinsic_global_atomic_fadd
:
482 case nir_intrinsic_global_atomic_fcomp_swap
:
483 case nir_intrinsic_global_atomic_fmax
:
484 case nir_intrinsic_global_atomic_fmin
:
485 case nir_intrinsic_global_atomic_imax
:
486 case nir_intrinsic_global_atomic_imin
:
487 case nir_intrinsic_global_atomic_or
:
488 case nir_intrinsic_global_atomic_umax
:
489 case nir_intrinsic_global_atomic_umin
:
490 case nir_intrinsic_global_atomic_xor
:
491 case nir_intrinsic_image_atomic_add
:
492 case nir_intrinsic_image_atomic_and
:
493 case nir_intrinsic_image_atomic_comp_swap
:
494 case nir_intrinsic_image_atomic_dec_wrap
:
495 case nir_intrinsic_image_atomic_exchange
:
496 case nir_intrinsic_image_atomic_fadd
:
497 case nir_intrinsic_image_atomic_imax
:
498 case nir_intrinsic_image_atomic_imin
:
499 case nir_intrinsic_image_atomic_inc_wrap
:
500 case nir_intrinsic_image_atomic_or
:
501 case nir_intrinsic_image_atomic_umax
:
502 case nir_intrinsic_image_atomic_umin
:
503 case nir_intrinsic_image_atomic_xor
:
504 case nir_intrinsic_image_deref_atomic_add
:
505 case nir_intrinsic_image_deref_atomic_and
:
506 case nir_intrinsic_image_deref_atomic_comp_swap
:
507 case nir_intrinsic_image_deref_atomic_dec_wrap
:
508 case nir_intrinsic_image_deref_atomic_exchange
:
509 case nir_intrinsic_image_deref_atomic_fadd
:
510 case nir_intrinsic_image_deref_atomic_imax
:
511 case nir_intrinsic_image_deref_atomic_imin
:
512 case nir_intrinsic_image_deref_atomic_inc_wrap
:
513 case nir_intrinsic_image_deref_atomic_or
:
514 case nir_intrinsic_image_deref_atomic_umax
:
515 case nir_intrinsic_image_deref_atomic_umin
:
516 case nir_intrinsic_image_deref_atomic_xor
:
517 case nir_intrinsic_image_deref_store
:
518 case nir_intrinsic_image_deref_store_raw_intel
:
519 case nir_intrinsic_image_store
:
520 case nir_intrinsic_image_store_raw_intel
:
521 case nir_intrinsic_ssbo_atomic_add
:
522 case nir_intrinsic_ssbo_atomic_add_ir3
:
523 case nir_intrinsic_ssbo_atomic_and
:
524 case nir_intrinsic_ssbo_atomic_and_ir3
:
525 case nir_intrinsic_ssbo_atomic_comp_swap
:
526 case nir_intrinsic_ssbo_atomic_comp_swap_ir3
:
527 case nir_intrinsic_ssbo_atomic_exchange
:
528 case nir_intrinsic_ssbo_atomic_exchange_ir3
:
529 case nir_intrinsic_ssbo_atomic_fadd
:
530 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
531 case nir_intrinsic_ssbo_atomic_fmax
:
532 case nir_intrinsic_ssbo_atomic_fmin
:
533 case nir_intrinsic_ssbo_atomic_imax
:
534 case nir_intrinsic_ssbo_atomic_imax_ir3
:
535 case nir_intrinsic_ssbo_atomic_imin
:
536 case nir_intrinsic_ssbo_atomic_imin_ir3
:
537 case nir_intrinsic_ssbo_atomic_or
:
538 case nir_intrinsic_ssbo_atomic_or_ir3
:
539 case nir_intrinsic_ssbo_atomic_umax
:
540 case nir_intrinsic_ssbo_atomic_umax_ir3
:
541 case nir_intrinsic_ssbo_atomic_umin
:
542 case nir_intrinsic_ssbo_atomic_umin_ir3
:
543 case nir_intrinsic_ssbo_atomic_xor
:
544 case nir_intrinsic_ssbo_atomic_xor_ir3
:
545 case nir_intrinsic_store_global
:
546 case nir_intrinsic_store_global_ir3
:
547 case nir_intrinsic_store_ssbo
:
548 case nir_intrinsic_store_ssbo_ir3
:
549 /* Only set this for globally visible memory, not scratch and not
552 shader
->info
.writes_memory
= true;
555 case nir_intrinsic_deref_atomic_add
:
556 case nir_intrinsic_deref_atomic_imin
:
557 case nir_intrinsic_deref_atomic_umin
:
558 case nir_intrinsic_deref_atomic_imax
:
559 case nir_intrinsic_deref_atomic_umax
:
560 case nir_intrinsic_deref_atomic_and
:
561 case nir_intrinsic_deref_atomic_or
:
562 case nir_intrinsic_deref_atomic_xor
:
563 case nir_intrinsic_deref_atomic_exchange
:
564 case nir_intrinsic_deref_atomic_comp_swap
:
565 update_memory_written_for_deref(shader
, nir_src_as_deref(instr
->src
[0]));
574 gather_tex_info(nir_tex_instr
*instr
, nir_shader
*shader
)
576 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
577 nir_tex_instr_has_implicit_derivative(instr
))
578 shader
->info
.fs
.needs_helper_invocations
= true;
582 shader
->info
.uses_texture_gather
= true;
590 gather_alu_info(nir_alu_instr
*instr
, nir_shader
*shader
)
595 shader
->info
.uses_fddx_fddy
= true;
597 case nir_op_fddx_fine
:
598 case nir_op_fddy_fine
:
599 case nir_op_fddx_coarse
:
600 case nir_op_fddy_coarse
:
601 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
602 shader
->info
.fs
.needs_helper_invocations
= true;
608 shader
->info
.uses_64bit
|= instr
->dest
.dest
.ssa
.bit_size
== 64;
609 unsigned num_srcs
= nir_op_infos
[instr
->op
].num_inputs
;
610 for (unsigned i
= 0; i
< num_srcs
; i
++) {
611 shader
->info
.uses_64bit
|= nir_src_bit_size(instr
->src
[i
].src
) == 64;
616 gather_info_block(nir_block
*block
, nir_shader
*shader
, void *dead_ctx
)
618 nir_foreach_instr(instr
, block
) {
619 switch (instr
->type
) {
620 case nir_instr_type_alu
:
621 gather_alu_info(nir_instr_as_alu(instr
), shader
);
623 case nir_instr_type_intrinsic
:
624 gather_intrinsic_info(nir_instr_as_intrinsic(instr
), shader
, dead_ctx
);
626 case nir_instr_type_tex
:
627 gather_tex_info(nir_instr_as_tex(instr
), shader
);
629 case nir_instr_type_call
:
630 assert(!"nir_shader_gather_info only works if functions are inlined");
639 nir_shader_gather_info(nir_shader
*shader
, nir_function_impl
*entrypoint
)
641 shader
->info
.num_textures
= 0;
642 shader
->info
.num_images
= 0;
643 shader
->info
.image_buffers
= 0;
644 shader
->info
.msaa_images
= 0;
646 nir_foreach_uniform_variable(var
, shader
) {
647 /* Bindless textures and images don't use non-bindless slots.
648 * Interface blocks imply inputs, outputs, UBO, or SSBO, which can only
651 if (var
->data
.bindless
|| var
->interface_type
)
654 shader
->info
.num_textures
+= glsl_type_get_sampler_count(var
->type
);
656 unsigned num_image_slots
= glsl_type_get_image_count(var
->type
);
657 if (num_image_slots
) {
658 const struct glsl_type
*image_type
= glsl_without_array(var
->type
);
660 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_BUF
) {
661 shader
->info
.image_buffers
|=
662 BITFIELD_RANGE(shader
->info
.num_images
, num_image_slots
);
664 if (glsl_get_sampler_dim(image_type
) == GLSL_SAMPLER_DIM_MS
) {
665 shader
->info
.msaa_images
|=
666 BITFIELD_RANGE(shader
->info
.num_images
, num_image_slots
);
668 shader
->info
.num_images
+= num_image_slots
;
672 shader
->info
.inputs_read
= 0;
673 shader
->info
.outputs_written
= 0;
674 shader
->info
.outputs_read
= 0;
675 shader
->info
.patch_outputs_read
= 0;
676 shader
->info
.patch_inputs_read
= 0;
677 shader
->info
.patch_outputs_written
= 0;
678 shader
->info
.system_values_read
= 0;
679 shader
->info
.inputs_read_indirectly
= 0;
680 shader
->info
.outputs_accessed_indirectly
= 0;
681 shader
->info
.patch_inputs_read_indirectly
= 0;
682 shader
->info
.patch_outputs_accessed_indirectly
= 0;
684 if (shader
->info
.stage
== MESA_SHADER_VERTEX
) {
685 shader
->info
.vs
.double_inputs
= 0;
687 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
688 shader
->info
.fs
.uses_sample_qualifier
= false;
689 shader
->info
.fs
.uses_discard
= false;
690 shader
->info
.fs
.uses_demote
= false;
691 shader
->info
.fs
.needs_helper_invocations
= false;
693 if (shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
694 shader
->info
.tess
.tcs_cross_invocation_inputs_read
= 0;
695 shader
->info
.tess
.tcs_cross_invocation_outputs_read
= 0;
698 shader
->info
.writes_memory
= shader
->info
.has_transform_feedback_varyings
;
700 void *dead_ctx
= ralloc_context(NULL
);
701 nir_foreach_block(block
, entrypoint
) {
702 gather_info_block(block
, shader
, dead_ctx
);
704 ralloc_free(dead_ctx
);