2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "main/menums.h"
28 set_io_mask(nir_shader
*shader
, nir_variable
*var
, int offset
, int len
,
31 for (int i
= 0; i
< len
; i
++) {
32 assert(var
->data
.location
!= -1);
34 int idx
= var
->data
.location
+ offset
+ i
;
35 bool is_patch_generic
= var
->data
.patch
&&
36 idx
!= VARYING_SLOT_TESS_LEVEL_INNER
&&
37 idx
!= VARYING_SLOT_TESS_LEVEL_OUTER
&&
38 idx
!= VARYING_SLOT_BOUNDING_BOX0
&&
39 idx
!= VARYING_SLOT_BOUNDING_BOX1
;
42 if (is_patch_generic
) {
43 assert(idx
>= VARYING_SLOT_PATCH0
&& idx
< VARYING_SLOT_TESS_MAX
);
44 bitfield
= BITFIELD64_BIT(idx
- VARYING_SLOT_PATCH0
);
47 assert(idx
< VARYING_SLOT_MAX
);
48 bitfield
= BITFIELD64_BIT(idx
);
51 if (var
->data
.mode
== nir_var_shader_in
) {
53 shader
->info
.patch_inputs_read
|= bitfield
;
55 shader
->info
.inputs_read
|= bitfield
;
57 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
58 shader
->info
.fs
.uses_sample_qualifier
|= var
->data
.sample
;
61 assert(var
->data
.mode
== nir_var_shader_out
);
63 if (is_patch_generic
) {
64 shader
->info
.patch_outputs_read
|= bitfield
;
66 shader
->info
.outputs_read
|= bitfield
;
69 if (is_patch_generic
) {
70 shader
->info
.patch_outputs_written
|= bitfield
;
71 } else if (!var
->data
.read_only
) {
72 shader
->info
.outputs_written
|= bitfield
;
77 if (var
->data
.fb_fetch_output
)
78 shader
->info
.outputs_read
|= bitfield
;
84 * Mark an entire variable as used. Caller must ensure that the variable
85 * represents a shader input or output.
88 mark_whole_variable(nir_shader
*shader
, nir_variable
*var
, bool is_output_read
)
90 const struct glsl_type
*type
= var
->type
;
92 if (nir_is_per_vertex_io(var
, shader
->info
.stage
)) {
93 assert(glsl_type_is_array(type
));
94 type
= glsl_get_array_element(type
);
97 const unsigned slots
=
98 var
->data
.compact
? DIV_ROUND_UP(glsl_get_length(type
), 4)
99 : glsl_count_attribute_slots(type
, false);
101 set_io_mask(shader
, var
, 0, slots
, is_output_read
);
105 get_io_offset(nir_deref_instr
*deref
, bool is_vertex_input
)
109 for (nir_deref_instr
*d
= deref
; d
; d
= nir_deref_instr_parent(d
)) {
110 if (d
->deref_type
== nir_deref_type_array
) {
111 if (!nir_src_is_const(d
->arr
.index
))
114 offset
+= glsl_count_attribute_slots(d
->type
, is_vertex_input
) *
115 nir_src_as_uint(d
->arr
.index
);
117 /* TODO: we can get the offset for structs here see nir_lower_io() */
124 * Try to mark a portion of the given varying as used. Caller must ensure
125 * that the variable represents a shader input or output.
127 * If the index can't be interpreted as a constant, or some other problem
128 * occurs, then nothing will be marked and false will be returned.
131 try_mask_partial_io(nir_shader
*shader
, nir_variable
*var
,
132 nir_deref_instr
*deref
, bool is_output_read
)
134 const struct glsl_type
*type
= var
->type
;
136 if (nir_is_per_vertex_io(var
, shader
->info
.stage
)) {
137 assert(glsl_type_is_array(type
));
138 type
= glsl_get_array_element(type
);
141 /* The code below only handles:
143 * - Indexing into matrices
144 * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
146 * For now, we just give up if we see varying structs and arrays of structs
147 * here marking the entire variable as used.
149 if (!(glsl_type_is_matrix(type
) ||
150 (glsl_type_is_array(type
) && !var
->data
.compact
&&
151 (glsl_type_is_numeric(glsl_without_array(type
)) ||
152 glsl_type_is_boolean(glsl_without_array(type
)))))) {
154 /* If we don't know how to handle this case, give up and let the
155 * caller mark the whole variable as used.
160 unsigned offset
= get_io_offset(deref
, false);
165 unsigned elem_width
= 1;
166 unsigned mat_cols
= 1;
167 if (glsl_type_is_array(type
)) {
168 num_elems
= glsl_get_aoa_size(type
);
169 if (glsl_type_is_matrix(glsl_without_array(type
)))
170 mat_cols
= glsl_get_matrix_columns(glsl_without_array(type
));
172 num_elems
= glsl_get_matrix_columns(type
);
175 /* double element width for double types that takes two slots */
176 if (glsl_type_is_dual_slot(glsl_without_array(type
)))
179 if (offset
>= num_elems
* elem_width
* mat_cols
) {
180 /* Constant index outside the bounds of the matrix/array. This could
181 * arise as a result of constant folding of a legal GLSL program.
183 * Even though the spec says that indexing outside the bounds of a
184 * matrix/array results in undefined behaviour, we don't want to pass
185 * out-of-range values to set_io_mask() (since this could result in
186 * slots that don't exist being marked as used), so just let the caller
187 * mark the whole variable as used.
192 set_io_mask(shader
, var
, offset
, elem_width
, is_output_read
);
197 gather_intrinsic_info(nir_intrinsic_instr
*instr
, nir_shader
*shader
,
200 switch (instr
->intrinsic
) {
201 case nir_intrinsic_demote
:
202 case nir_intrinsic_demote_if
:
203 case nir_intrinsic_discard
:
204 case nir_intrinsic_discard_if
:
205 assert(shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
206 shader
->info
.fs
.uses_discard
= true;
209 case nir_intrinsic_interp_deref_at_centroid
:
210 case nir_intrinsic_interp_deref_at_sample
:
211 case nir_intrinsic_interp_deref_at_offset
:
212 case nir_intrinsic_load_deref
:
213 case nir_intrinsic_store_deref
:{
214 nir_deref_instr
*deref
= nir_src_as_deref(instr
->src
[0]);
215 if (deref
->mode
== nir_var_shader_in
||
216 deref
->mode
== nir_var_shader_out
) {
217 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
218 bool is_output_read
= false;
219 if (var
->data
.mode
== nir_var_shader_out
&&
220 instr
->intrinsic
== nir_intrinsic_load_deref
)
221 is_output_read
= true;
223 if (!try_mask_partial_io(shader
, var
, deref
, is_output_read
))
224 mark_whole_variable(shader
, var
, is_output_read
);
226 /* We need to track which input_reads bits correspond to a
227 * dvec3/dvec4 input attribute */
228 if (shader
->info
.stage
== MESA_SHADER_VERTEX
&&
229 var
->data
.mode
== nir_var_shader_in
&&
230 glsl_type_is_dual_slot(glsl_without_array(var
->type
))) {
231 for (unsigned i
= 0; i
< glsl_count_attribute_slots(var
->type
, false); i
++) {
232 int idx
= var
->data
.location
+ i
;
233 shader
->info
.vs
.double_inputs
|= BITFIELD64_BIT(idx
);
240 case nir_intrinsic_load_draw_id
:
241 case nir_intrinsic_load_frag_coord
:
242 case nir_intrinsic_load_point_coord
:
243 case nir_intrinsic_load_front_face
:
244 case nir_intrinsic_load_vertex_id
:
245 case nir_intrinsic_load_vertex_id_zero_base
:
246 case nir_intrinsic_load_base_vertex
:
247 case nir_intrinsic_load_first_vertex
:
248 case nir_intrinsic_load_is_indexed_draw
:
249 case nir_intrinsic_load_base_instance
:
250 case nir_intrinsic_load_instance_id
:
251 case nir_intrinsic_load_sample_id
:
252 case nir_intrinsic_load_sample_pos
:
253 case nir_intrinsic_load_sample_mask_in
:
254 case nir_intrinsic_load_primitive_id
:
255 case nir_intrinsic_load_invocation_id
:
256 case nir_intrinsic_load_local_invocation_id
:
257 case nir_intrinsic_load_local_invocation_index
:
258 case nir_intrinsic_load_work_group_id
:
259 case nir_intrinsic_load_num_work_groups
:
260 case nir_intrinsic_load_tess_coord
:
261 case nir_intrinsic_load_tess_level_outer
:
262 case nir_intrinsic_load_tess_level_inner
:
263 case nir_intrinsic_load_patch_vertices_in
:
264 shader
->info
.system_values_read
|=
265 (1ull << nir_system_value_from_intrinsic(instr
->intrinsic
));
268 case nir_intrinsic_quad_broadcast
:
269 case nir_intrinsic_quad_swap_horizontal
:
270 case nir_intrinsic_quad_swap_vertical
:
271 case nir_intrinsic_quad_swap_diagonal
:
272 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
273 shader
->info
.fs
.needs_helper_invocations
= true;
276 case nir_intrinsic_end_primitive
:
277 case nir_intrinsic_end_primitive_with_counter
:
278 assert(shader
->info
.stage
== MESA_SHADER_GEOMETRY
);
279 shader
->info
.gs
.uses_end_primitive
= 1;
282 case nir_intrinsic_emit_vertex
:
283 if (nir_intrinsic_stream_id(instr
) > 0)
284 shader
->info
.gs
.uses_streams
= true;
294 gather_tex_info(nir_tex_instr
*instr
, nir_shader
*shader
)
296 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
297 nir_tex_instr_has_implicit_derivative(instr
))
298 shader
->info
.fs
.needs_helper_invocations
= true;
302 shader
->info
.uses_texture_gather
= true;
310 gather_alu_info(nir_alu_instr
*instr
, nir_shader
*shader
)
315 shader
->info
.uses_fddx_fddy
= true;
317 case nir_op_fddx_fine
:
318 case nir_op_fddy_fine
:
319 case nir_op_fddx_coarse
:
320 case nir_op_fddy_coarse
:
321 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
)
322 shader
->info
.fs
.needs_helper_invocations
= true;
328 shader
->info
.uses_64bit
|= instr
->dest
.dest
.ssa
.bit_size
== 64;
329 unsigned num_srcs
= nir_op_infos
[instr
->op
].num_inputs
;
330 for (unsigned i
= 0; i
< num_srcs
; i
++) {
331 shader
->info
.uses_64bit
|= nir_src_bit_size(instr
->src
[i
].src
) == 64;
336 gather_info_block(nir_block
*block
, nir_shader
*shader
, void *dead_ctx
)
338 nir_foreach_instr(instr
, block
) {
339 switch (instr
->type
) {
340 case nir_instr_type_alu
:
341 gather_alu_info(nir_instr_as_alu(instr
), shader
);
343 case nir_instr_type_intrinsic
:
344 gather_intrinsic_info(nir_instr_as_intrinsic(instr
), shader
, dead_ctx
);
346 case nir_instr_type_tex
:
347 gather_tex_info(nir_instr_as_tex(instr
), shader
);
349 case nir_instr_type_call
:
350 assert(!"nir_shader_gather_info only works if functions are inlined");
359 nir_shader_gather_info(nir_shader
*shader
, nir_function_impl
*entrypoint
)
361 shader
->info
.num_textures
= 0;
362 shader
->info
.num_images
= 0;
363 shader
->info
.last_msaa_image
= -1;
364 nir_foreach_variable(var
, &shader
->uniforms
) {
365 /* Bindless textures and images don't use non-bindless slots. */
366 if (var
->data
.bindless
)
369 shader
->info
.num_textures
+= glsl_type_get_sampler_count(var
->type
);
370 shader
->info
.num_images
+= glsl_type_get_image_count(var
->type
);
372 /* Assuming image slots don't have holes (e.g. OpenGL) */
373 if (glsl_type_is_image(var
->type
) &&
374 glsl_get_sampler_dim(var
->type
) == GLSL_SAMPLER_DIM_MS
)
375 shader
->info
.last_msaa_image
= shader
->info
.num_images
- 1;
378 shader
->info
.inputs_read
= 0;
379 shader
->info
.outputs_written
= 0;
380 shader
->info
.outputs_read
= 0;
381 shader
->info
.patch_outputs_read
= 0;
382 shader
->info
.patch_inputs_read
= 0;
383 shader
->info
.patch_outputs_written
= 0;
384 shader
->info
.system_values_read
= 0;
385 if (shader
->info
.stage
== MESA_SHADER_VERTEX
) {
386 shader
->info
.vs
.double_inputs
= 0;
388 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
) {
389 shader
->info
.fs
.uses_sample_qualifier
= false;
390 shader
->info
.fs
.uses_discard
= false;
391 shader
->info
.fs
.needs_helper_invocations
= false;
394 void *dead_ctx
= ralloc_context(NULL
);
395 nir_foreach_block(block
, entrypoint
) {
396 gather_info_block(block
, shader
, dead_ctx
);
398 ralloc_free(dead_ctx
);