2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_deref.h"
26 #include "main/menums.h"
29 get_deref_info(nir_shader
*shader
, nir_variable
*var
, nir_deref_instr
*deref
,
30 bool *cross_invocation
, bool *indirect
)
32 *cross_invocation
= false;
35 const bool per_vertex
= nir_is_per_vertex_io(var
, shader
->info
.stage
);
38 nir_deref_path_init(&path
, deref
, NULL
);
39 assert(path
.path
[0]->deref_type
== nir_deref_type_var
);
40 nir_deref_instr
**p
= &path
.path
[1];
42 /* Vertex index is the outermost array index. */
44 assert((*p
)->deref_type
== nir_deref_type_array
);
45 nir_instr
*vertex_index_instr
= (*p
)->arr
.index
.ssa
->parent_instr
;
47 vertex_index_instr
->type
!= nir_instr_type_intrinsic
||
48 nir_instr_as_intrinsic(vertex_index_instr
)->intrinsic
!=
49 nir_intrinsic_load_invocation_id
;
53 /* We always lower indirect dereferences for "compact" array vars. */
54 if (!path
.path
[0]->var
->data
.compact
) {
55 /* Non-compact array vars: find out if they are indirect. */
57 if ((*p
)->deref_type
== nir_deref_type_array
) {
58 *indirect
|= !nir_src_is_const((*p
)->arr
.index
);
59 } else if ((*p
)->deref_type
== nir_deref_type_struct
) {
60 /* Struct indices are always constant. */
62 unreachable("Unsupported deref type");
67 nir_deref_path_finish(&path
);
71 set_io_mask(nir_shader
*shader
, nir_variable
*var
, int offset
, int len
,
72 nir_deref_instr
*deref
, bool is_output_read
)
74 for (int i
= 0; i
< len
; i
++) {
75 assert(var
->data
.location
!= -1);
77 int idx
= var
->data
.location
+ offset
+ i
;
78 bool is_patch_generic
= var
->data
.patch
&&
79 idx
!= VARYING_SLOT_TESS_LEVEL_INNER
&&
80 idx
!= VARYING_SLOT_TESS_LEVEL_OUTER
&&
81 idx
!= VARYING_SLOT_BOUNDING_BOX0
&&
82 idx
!= VARYING_SLOT_BOUNDING_BOX1
;
85 if (is_patch_generic
) {
86 assert(idx
>= VARYING_SLOT_PATCH0
&& idx
< VARYING_SLOT_TESS_MAX
);
87 bitfield
= BITFIELD64_BIT(idx
- VARYING_SLOT_PATCH0
);
90 assert(idx
< VARYING_SLOT_MAX
);
91 bitfield
= BITFIELD64_BIT(idx
);
94 bool cross_invocation
;
96 get_deref_info(shader
, var
, deref
, &cross_invocation
, &indirect
);
98 if (var
->data
.mode
== nir_var_shader_in
) {
99 if (is_patch_generic
) {
100 shader
->info
.patch_inputs_read
|= bitfield
;
102 shader
->info
.patch_inputs_read_indirectly
|= bitfield
;
104 shader
->info
.inputs_read
|= bitfield
;
106 shader
->info
.inputs_read_indirectly
|= bitfield
;
109 if (cross_invocation
&& shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
110 shader
->info
.tess
.tcs_cross_invocation_inputs_read
|= bitfield
;
112 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
113 shader
->info
.fs
.uses_sample_qualifier
|= var
->data
.sample
;
116 assert(var
->data
.mode
== nir_var_shader_out
);
117 if (is_output_read
) {
118 if (is_patch_generic
) {
119 shader
->info
.patch_outputs_read
|= bitfield
;
121 shader
->info
.patch_outputs_accessed_indirectly
|= bitfield
;
123 shader
->info
.outputs_read
|= bitfield
;
125 shader
->info
.outputs_accessed_indirectly
|= bitfield
;
128 if (cross_invocation
&& shader
->info
.stage
== MESA_SHADER_TESS_CTRL
)
129 shader
->info
.tess
.tcs_cross_invocation_outputs_read
|= bitfield
;
131 if (is_patch_generic
) {
132 shader
->info
.patch_outputs_written
|= bitfield
;
134 shader
->info
.patch_outputs_accessed_indirectly
|= bitfield
;
135 } else if (!var
->data
.read_only
) {
136 shader
->info
.outputs_written
|= bitfield
;
138 shader
->info
.outputs_accessed_indirectly
|= bitfield
;
143 if (var
->data
.fb_fetch_output
)
144 shader
->info
.outputs_read
|= bitfield
;
150 * Mark an entire variable as used. Caller must ensure that the variable
151 * represents a shader input or output.
154 mark_whole_variable(nir_shader
*shader
, nir_variable
*var
,
155 nir_deref_instr
*deref
, bool is_output_read
)
157 const struct glsl_type
*type
= var
->type
;
159 if (nir_is_per_vertex_io(var
, shader
->info
.stage
)) {
160 assert(glsl_type_is_array(type
));
161 type
= glsl_get_array_element(type
);
164 if (var
->data
.per_view
) {
165 /* TODO: Per view and Per Vertex are not currently used together. When
166 * they start to be used (e.g. when adding Primitive Replication for GS
167 * on Intel), verify that "peeling" the type twice is correct. This
168 * assert ensures we remember it.
170 assert(!nir_is_per_vertex_io(var
, shader
->info
.stage
));
171 assert(glsl_type_is_array(type
));
172 type
= glsl_get_array_element(type
);
175 const unsigned slots
=
176 var
->data
.compact
? DIV_ROUND_UP(glsl_get_length(type
), 4)
177 : glsl_count_attribute_slots(type
, false);
179 set_io_mask(shader
, var
, 0, slots
, deref
, is_output_read
);
183 get_io_offset(nir_deref_instr
*deref
, bool is_vertex_input
, bool per_vertex
)
187 for (nir_deref_instr
*d
= deref
; d
; d
= nir_deref_instr_parent(d
)) {
188 if (d
->deref_type
== nir_deref_type_array
) {
189 if (per_vertex
&& nir_deref_instr_parent(d
)->deref_type
== nir_deref_type_var
)
192 if (!nir_src_is_const(d
->arr
.index
))
195 offset
+= glsl_count_attribute_slots(d
->type
, is_vertex_input
) *
196 nir_src_as_uint(d
->arr
.index
);
198 /* TODO: we can get the offset for structs here see nir_lower_io() */
205 * Try to mark a portion of the given varying as used. Caller must ensure
206 * that the variable represents a shader input or output.
208 * If the index can't be interpreted as a constant, or some other problem
209 * occurs, then nothing will be marked and false will be returned.
212 try_mask_partial_io(nir_shader
*shader
, nir_variable
*var
,
213 nir_deref_instr
*deref
, bool is_output_read
)
215 const struct glsl_type
*type
= var
->type
;
216 bool per_vertex
= nir_is_per_vertex_io(var
, shader
->info
.stage
);
219 assert(glsl_type_is_array(type
));
220 type
= glsl_get_array_element(type
);
223 /* Per view variables will be considered as a whole. */
224 if (var
->data
.per_view
)
227 /* The code below only handles:
229 * - Indexing into matrices
230 * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
232 * For now, we just give up if we see varying structs and arrays of structs
233 * here marking the entire variable as used.
235 if (!(glsl_type_is_matrix(type
) ||
236 (glsl_type_is_array(type
) && !var
->data
.compact
&&
237 (glsl_type_is_numeric(glsl_without_array(type
)) ||
238 glsl_type_is_boolean(glsl_without_array(type
)))))) {
240 /* If we don't know how to handle this case, give up and let the
241 * caller mark the whole variable as used.
246 unsigned offset
= get_io_offset(deref
, false, per_vertex
);
251 unsigned elem_width
= 1;
252 unsigned mat_cols
= 1;
253 if (glsl_type_is_array(type
)) {
254 num_elems
= glsl_get_aoa_size(type
);
255 if (glsl_type_is_matrix(glsl_without_array(type
)))
256 mat_cols
= glsl_get_matrix_columns(glsl_without_array(type
));
258 num_elems
= glsl_get_matrix_columns(type
);
261 /* double element width for double types that takes two slots */
262 if (glsl_type_is_dual_slot(glsl_without_array(type
)))
265 if (offset
>= num_elems
* elem_width
* mat_cols
) {
266 /* Constant index outside the bounds of the matrix/array. This could
267 * arise as a result of constant folding of a legal GLSL program.
269 * Even though the spec says that indexing outside the bounds of a
270 * matrix/array results in undefined behaviour, we don't want to pass
271 * out-of-range values to set_io_mask() (since this could result in
272 * slots that don't exist being marked as used), so just let the caller
273 * mark the whole variable as used.
278 set_io_mask(shader
, var
, offset
, elem_width
, deref
, is_output_read
);
283 gather_intrinsic_info(nir_intrinsic_instr
*instr
, nir_shader
*shader
,
286 switch (instr
->intrinsic
) {
287 case nir_intrinsic_demote
:
288 case nir_intrinsic_demote_if
:
289 shader
->info
.fs
.uses_demote
= true;
290 /* fallthrough: quads with helper lanes only might be discarded entirely */
291 case nir_intrinsic_discard
:
292 case nir_intrinsic_discard_if
:
293 /* Freedreno uses the discard_if intrinsic to end GS invocations that
294 * don't produce a vertex, so we only set uses_discard if executing on
295 * a fragment shader. */
296 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
297 shader
->info
.fs
.uses_discard
= true;
300 case nir_intrinsic_interp_deref_at_centroid
:
301 case nir_intrinsic_interp_deref_at_sample
:
302 case nir_intrinsic_interp_deref_at_offset
:
303 case nir_intrinsic_interp_deref_at_vertex
:
304 case nir_intrinsic_load_deref
:
305 case nir_intrinsic_store_deref
:{
306 nir_deref_instr
*deref
= nir_src_as_deref(instr
->src
[0]);
307 if (deref
->mode
== nir_var_shader_in
||
308 deref
->mode
== nir_var_shader_out
) {
309 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
310 bool is_output_read
= false;
311 if (var
->data
.mode
== nir_var_shader_out
&&
312 instr
->intrinsic
== nir_intrinsic_load_deref
)
313 is_output_read
= true;
315 if (!try_mask_partial_io(shader
, var
, deref
, is_output_read
))
316 mark_whole_variable(shader
, var
, deref
, is_output_read
);
318 /* We need to track which input_reads bits correspond to a
319 * dvec3/dvec4 input attribute */
320 if (shader
->info
.stage
== MESA_SHADER_VERTEX
&&
321 var
->data
.mode
== nir_var_shader_in
&&
322 glsl_type_is_dual_slot(glsl_without_array(var
->type
))) {
323 for (unsigned i
= 0; i
< glsl_count_attribute_slots(var
->type
, false); i
++) {
324 int idx
= var
->data
.location
+ i
;
325 shader
->info
.vs
.double_inputs
|= BITFIELD64_BIT(idx
);
332 case nir_intrinsic_load_draw_id
:
333 case nir_intrinsic_load_frag_coord
:
334 case nir_intrinsic_load_point_coord
:
335 case nir_intrinsic_load_front_face
:
336 case nir_intrinsic_load_vertex_id
:
337 case nir_intrinsic_load_vertex_id_zero_base
:
338 case nir_intrinsic_load_base_vertex
:
339 case nir_intrinsic_load_first_vertex
:
340 case nir_intrinsic_load_is_indexed_draw
:
341 case nir_intrinsic_load_base_instance
:
342 case nir_intrinsic_load_instance_id
:
343 case nir_intrinsic_load_sample_id
:
344 case nir_intrinsic_load_sample_pos
:
345 case nir_intrinsic_load_sample_mask_in
:
346 case nir_intrinsic_load_primitive_id
:
347 case nir_intrinsic_load_invocation_id
:
348 case nir_intrinsic_load_local_invocation_id
:
349 case nir_intrinsic_load_local_invocation_index
:
350 case nir_intrinsic_load_work_group_id
:
351 case nir_intrinsic_load_num_work_groups
:
352 case nir_intrinsic_load_tess_coord
:
353 case nir_intrinsic_load_tess_level_outer
:
354 case nir_intrinsic_load_tess_level_inner
:
355 case nir_intrinsic_load_patch_vertices_in
:
356 shader
->info
.system_values_read
|=
357 (1ull << nir_system_value_from_intrinsic(instr
->intrinsic
));
360 case nir_intrinsic_quad_broadcast
:
361 case nir_intrinsic_quad_swap_horizontal
:
362 case nir_intrinsic_quad_swap_vertical
:
363 case nir_intrinsic_quad_swap_diagonal
:
364 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
365 shader
->info
.fs
.needs_helper_invocations
= true;
368 case nir_intrinsic_end_primitive
:
369 case nir_intrinsic_end_primitive_with_counter
:
370 assert(shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
371 shader
->info
.gs
.uses_end_primitive
= 1;
374 case nir_intrinsic_emit_vertex
:
375 case nir_intrinsic_emit_vertex_with_counter
:
376 if (nir_intrinsic_stream_id(instr
) > 0)
377 shader
->info
.gs
.uses_streams
= true;
381 case nir_intrinsic_bindless_image_atomic_add
:
382 case nir_intrinsic_bindless_image_atomic_and
:
383 case nir_intrinsic_bindless_image_atomic_comp_swap
:
384 case nir_intrinsic_bindless_image_atomic_dec_wrap
:
385 case nir_intrinsic_bindless_image_atomic_exchange
:
386 case nir_intrinsic_bindless_image_atomic_fadd
:
387 case nir_intrinsic_bindless_image_atomic_imax
:
388 case nir_intrinsic_bindless_image_atomic_imin
:
389 case nir_intrinsic_bindless_image_atomic_inc_wrap
:
390 case nir_intrinsic_bindless_image_atomic_or
:
391 case nir_intrinsic_bindless_image_atomic_umax
:
392 case nir_intrinsic_bindless_image_atomic_umin
:
393 case nir_intrinsic_bindless_image_atomic_xor
:
394 case nir_intrinsic_bindless_image_store
:
395 case nir_intrinsic_bindless_image_store_raw_intel
:
396 case nir_intrinsic_global_atomic_add
:
397 case nir_intrinsic_global_atomic_and
:
398 case nir_intrinsic_global_atomic_comp_swap
:
399 case nir_intrinsic_global_atomic_exchange
:
400 case nir_intrinsic_global_atomic_fadd
:
401 case nir_intrinsic_global_atomic_fcomp_swap
:
402 case nir_intrinsic_global_atomic_fmax
:
403 case nir_intrinsic_global_atomic_fmin
:
404 case nir_intrinsic_global_atomic_imax
:
405 case nir_intrinsic_global_atomic_imin
:
406 case nir_intrinsic_global_atomic_or
:
407 case nir_intrinsic_global_atomic_umax
:
408 case nir_intrinsic_global_atomic_umin
:
409 case nir_intrinsic_global_atomic_xor
:
410 case nir_intrinsic_image_atomic_add
:
411 case nir_intrinsic_image_atomic_and
:
412 case nir_intrinsic_image_atomic_comp_swap
:
413 case nir_intrinsic_image_atomic_dec_wrap
:
414 case nir_intrinsic_image_atomic_exchange
:
415 case nir_intrinsic_image_atomic_fadd
:
416 case nir_intrinsic_image_atomic_imax
:
417 case nir_intrinsic_image_atomic_imin
:
418 case nir_intrinsic_image_atomic_inc_wrap
:
419 case nir_intrinsic_image_atomic_or
:
420 case nir_intrinsic_image_atomic_umax
:
421 case nir_intrinsic_image_atomic_umin
:
422 case nir_intrinsic_image_atomic_xor
:
423 case nir_intrinsic_image_deref_atomic_add
:
424 case nir_intrinsic_image_deref_atomic_and
:
425 case nir_intrinsic_image_deref_atomic_comp_swap
:
426 case nir_intrinsic_image_deref_atomic_dec_wrap
:
427 case nir_intrinsic_image_deref_atomic_exchange
:
428 case nir_intrinsic_image_deref_atomic_fadd
:
429 case nir_intrinsic_image_deref_atomic_imax
:
430 case nir_intrinsic_image_deref_atomic_imin
:
431 case nir_intrinsic_image_deref_atomic_inc_wrap
:
432 case nir_intrinsic_image_deref_atomic_or
:
433 case nir_intrinsic_image_deref_atomic_umax
:
434 case nir_intrinsic_image_deref_atomic_umin
:
435 case nir_intrinsic_image_deref_atomic_xor
:
436 case nir_intrinsic_image_deref_store
:
437 case nir_intrinsic_image_deref_store_raw_intel
:
438 case nir_intrinsic_image_store
:
439 case nir_intrinsic_image_store_raw_intel
:
440 case nir_intrinsic_ssbo_atomic_add
:
441 case nir_intrinsic_ssbo_atomic_add_ir3
:
442 case nir_intrinsic_ssbo_atomic_and
:
443 case nir_intrinsic_ssbo_atomic_and_ir3
:
444 case nir_intrinsic_ssbo_atomic_comp_swap
:
445 case nir_intrinsic_ssbo_atomic_comp_swap_ir3
:
446 case nir_intrinsic_ssbo_atomic_exchange
:
447 case nir_intrinsic_ssbo_atomic_exchange_ir3
:
448 case nir_intrinsic_ssbo_atomic_fadd
:
449 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
450 case nir_intrinsic_ssbo_atomic_fmax
:
451 case nir_intrinsic_ssbo_atomic_fmin
:
452 case nir_intrinsic_ssbo_atomic_imax
:
453 case nir_intrinsic_ssbo_atomic_imax_ir3
:
454 case nir_intrinsic_ssbo_atomic_imin
:
455 case nir_intrinsic_ssbo_atomic_imin_ir3
:
456 case nir_intrinsic_ssbo_atomic_or
:
457 case nir_intrinsic_ssbo_atomic_or_ir3
:
458 case nir_intrinsic_ssbo_atomic_umax
:
459 case nir_intrinsic_ssbo_atomic_umax_ir3
:
460 case nir_intrinsic_ssbo_atomic_umin
:
461 case nir_intrinsic_ssbo_atomic_umin_ir3
:
462 case nir_intrinsic_ssbo_atomic_xor
:
463 case nir_intrinsic_ssbo_atomic_xor_ir3
:
464 case nir_intrinsic_store_global
:
465 case nir_intrinsic_store_global_ir3
:
466 case nir_intrinsic_store_ssbo
:
467 case nir_intrinsic_store_ssbo_ir3
:
468 /* Only set this for globally visible memory, not scratch and not
471 shader
->info
.writes_memory
= true;
480 gather_tex_info(nir_tex_instr
*instr
, nir_shader
*shader
)
482 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
483 nir_tex_instr_has_implicit_derivative(instr
))
484 shader
->info
.fs
.needs_helper_invocations
= true;
488 shader
->info
.uses_texture_gather
= true;
496 gather_alu_info(nir_alu_instr
*instr
, nir_shader
*shader
)
501 shader
->info
.uses_fddx_fddy
= true;
503 case nir_op_fddx_fine
:
504 case nir_op_fddy_fine
:
505 case nir_op_fddx_coarse
:
506 case nir_op_fddy_coarse
:
507 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
508 shader
->info
.fs
.needs_helper_invocations
= true;
514 shader
->info
.uses_64bit
|= instr
->dest
.dest
.ssa
.bit_size
== 64;
515 unsigned num_srcs
= nir_op_infos
[instr
->op
].num_inputs
;
516 for (unsigned i
= 0; i
< num_srcs
; i
++) {
517 shader
->info
.uses_64bit
|= nir_src_bit_size(instr
->src
[i
].src
) == 64;
522 gather_info_block(nir_block
*block
, nir_shader
*shader
, void *dead_ctx
)
524 nir_foreach_instr(instr
, block
) {
525 switch (instr
->type
) {
526 case nir_instr_type_alu
:
527 gather_alu_info(nir_instr_as_alu(instr
), shader
);
529 case nir_instr_type_intrinsic
:
530 gather_intrinsic_info(nir_instr_as_intrinsic(instr
), shader
, dead_ctx
);
532 case nir_instr_type_tex
:
533 gather_tex_info(nir_instr_as_tex(instr
), shader
);
535 case nir_instr_type_call
:
536 assert(!"nir_shader_gather_info only works if functions are inlined");
545 nir_shader_gather_info(nir_shader
*shader
, nir_function_impl
*entrypoint
)
547 shader
->info
.num_textures
= 0;
548 shader
->info
.num_images
= 0;
549 shader
->info
.last_msaa_image
= -1;
551 nir_foreach_variable(var
, &shader
->uniforms
) {
552 /* Bindless textures and images don't use non-bindless slots. */
553 if (var
->data
.bindless
)
556 shader
->info
.num_textures
+= glsl_type_get_sampler_count(var
->type
);
557 shader
->info
.num_images
+= glsl_type_get_image_count(var
->type
);
559 /* Assuming image slots don't have holes (e.g. OpenGL) */
560 if (glsl_type_is_image(var
->type
) &&
561 glsl_get_sampler_dim(var
->type
) == GLSL_SAMPLER_DIM_MS
)
562 shader
->info
.last_msaa_image
= shader
->info
.num_images
- 1;
565 shader
->info
.inputs_read
= 0;
566 shader
->info
.outputs_written
= 0;
567 shader
->info
.outputs_read
= 0;
568 shader
->info
.patch_outputs_read
= 0;
569 shader
->info
.patch_inputs_read
= 0;
570 shader
->info
.patch_outputs_written
= 0;
571 shader
->info
.system_values_read
= 0;
572 shader
->info
.inputs_read_indirectly
= 0;
573 shader
->info
.outputs_accessed_indirectly
= 0;
574 shader
->info
.patch_inputs_read_indirectly
= 0;
575 shader
->info
.patch_outputs_accessed_indirectly
= 0;
577 if (shader
->info
.stage
== MESA_SHADER_VERTEX
) {
578 shader
->info
.vs
.double_inputs
= 0;
580 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
581 shader
->info
.fs
.uses_sample_qualifier
= false;
582 shader
->info
.fs
.uses_discard
= false;
583 shader
->info
.fs
.uses_demote
= false;
584 shader
->info
.fs
.needs_helper_invocations
= false;
586 if (shader
->info
.stage
== MESA_SHADER_TESS_CTRL
) {
587 shader
->info
.tess
.tcs_cross_invocation_inputs_read
= 0;
588 shader
->info
.tess
.tcs_cross_invocation_outputs_read
= 0;
591 shader
->info
.writes_memory
= shader
->info
.has_transform_feedback_varyings
;
593 void *dead_ctx
= ralloc_context(NULL
);
594 nir_foreach_block(block
, entrypoint
) {
595 gather_info_block(block
, shader
, dead_ctx
);
597 ralloc_free(dead_ctx
);