nir/gather_info: handle emit_vertex_with_counter
[mesa.git] / src / compiler / nir / nir_gather_info.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "main/menums.h"
26
27 static void
28 set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len,
29 bool is_output_read)
30 {
31 for (int i = 0; i < len; i++) {
32 assert(var->data.location != -1);
33
34 int idx = var->data.location + offset + i;
35 bool is_patch_generic = var->data.patch &&
36 idx != VARYING_SLOT_TESS_LEVEL_INNER &&
37 idx != VARYING_SLOT_TESS_LEVEL_OUTER &&
38 idx != VARYING_SLOT_BOUNDING_BOX0 &&
39 idx != VARYING_SLOT_BOUNDING_BOX1;
40 uint64_t bitfield;
41
42 if (is_patch_generic) {
43 assert(idx >= VARYING_SLOT_PATCH0 && idx < VARYING_SLOT_TESS_MAX);
44 bitfield = BITFIELD64_BIT(idx - VARYING_SLOT_PATCH0);
45 }
46 else {
47 assert(idx < VARYING_SLOT_MAX);
48 bitfield = BITFIELD64_BIT(idx);
49 }
50
51 if (var->data.mode == nir_var_shader_in) {
52 if (is_patch_generic)
53 shader->info.patch_inputs_read |= bitfield;
54 else
55 shader->info.inputs_read |= bitfield;
56
57 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
58 shader->info.fs.uses_sample_qualifier |= var->data.sample;
59 }
60 } else {
61 assert(var->data.mode == nir_var_shader_out);
62 if (is_output_read) {
63 if (is_patch_generic) {
64 shader->info.patch_outputs_read |= bitfield;
65 } else {
66 shader->info.outputs_read |= bitfield;
67 }
68 } else {
69 if (is_patch_generic) {
70 shader->info.patch_outputs_written |= bitfield;
71 } else if (!var->data.read_only) {
72 shader->info.outputs_written |= bitfield;
73 }
74 }
75
76
77 if (var->data.fb_fetch_output)
78 shader->info.outputs_read |= bitfield;
79 }
80 }
81 }
82
83 /**
84 * Mark an entire variable as used. Caller must ensure that the variable
85 * represents a shader input or output.
86 */
87 static void
88 mark_whole_variable(nir_shader *shader, nir_variable *var, bool is_output_read)
89 {
90 const struct glsl_type *type = var->type;
91
92 if (nir_is_per_vertex_io(var, shader->info.stage)) {
93 assert(glsl_type_is_array(type));
94 type = glsl_get_array_element(type);
95 }
96
97 const unsigned slots =
98 var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4)
99 : glsl_count_attribute_slots(type, false);
100
101 set_io_mask(shader, var, 0, slots, is_output_read);
102 }
103
104 static unsigned
105 get_io_offset(nir_deref_instr *deref, bool is_vertex_input)
106 {
107 unsigned offset = 0;
108
109 for (nir_deref_instr *d = deref; d; d = nir_deref_instr_parent(d)) {
110 if (d->deref_type == nir_deref_type_array) {
111 if (!nir_src_is_const(d->arr.index))
112 return -1;
113
114 offset += glsl_count_attribute_slots(d->type, is_vertex_input) *
115 nir_src_as_uint(d->arr.index);
116 }
117 /* TODO: we can get the offset for structs here see nir_lower_io() */
118 }
119
120 return offset;
121 }
122
123 /**
124 * Try to mark a portion of the given varying as used. Caller must ensure
125 * that the variable represents a shader input or output.
126 *
127 * If the index can't be interpreted as a constant, or some other problem
128 * occurs, then nothing will be marked and false will be returned.
129 */
130 static bool
131 try_mask_partial_io(nir_shader *shader, nir_variable *var,
132 nir_deref_instr *deref, bool is_output_read)
133 {
134 const struct glsl_type *type = var->type;
135
136 if (nir_is_per_vertex_io(var, shader->info.stage)) {
137 assert(glsl_type_is_array(type));
138 type = glsl_get_array_element(type);
139 }
140
141 /* The code below only handles:
142 *
143 * - Indexing into matrices
144 * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
145 *
146 * For now, we just give up if we see varying structs and arrays of structs
147 * here marking the entire variable as used.
148 */
149 if (!(glsl_type_is_matrix(type) ||
150 (glsl_type_is_array(type) && !var->data.compact &&
151 (glsl_type_is_numeric(glsl_without_array(type)) ||
152 glsl_type_is_boolean(glsl_without_array(type)))))) {
153
154 /* If we don't know how to handle this case, give up and let the
155 * caller mark the whole variable as used.
156 */
157 return false;
158 }
159
160 unsigned offset = get_io_offset(deref, false);
161 if (offset == -1)
162 return false;
163
164 unsigned num_elems;
165 unsigned elem_width = 1;
166 unsigned mat_cols = 1;
167 if (glsl_type_is_array(type)) {
168 num_elems = glsl_get_aoa_size(type);
169 if (glsl_type_is_matrix(glsl_without_array(type)))
170 mat_cols = glsl_get_matrix_columns(glsl_without_array(type));
171 } else {
172 num_elems = glsl_get_matrix_columns(type);
173 }
174
175 /* double element width for double types that takes two slots */
176 if (glsl_type_is_dual_slot(glsl_without_array(type)))
177 elem_width *= 2;
178
179 if (offset >= num_elems * elem_width * mat_cols) {
180 /* Constant index outside the bounds of the matrix/array. This could
181 * arise as a result of constant folding of a legal GLSL program.
182 *
183 * Even though the spec says that indexing outside the bounds of a
184 * matrix/array results in undefined behaviour, we don't want to pass
185 * out-of-range values to set_io_mask() (since this could result in
186 * slots that don't exist being marked as used), so just let the caller
187 * mark the whole variable as used.
188 */
189 return false;
190 }
191
192 set_io_mask(shader, var, offset, elem_width, is_output_read);
193 return true;
194 }
195
196 static void
197 gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader,
198 void *dead_ctx)
199 {
200 switch (instr->intrinsic) {
201 case nir_intrinsic_demote:
202 case nir_intrinsic_demote_if:
203 shader->info.fs.uses_demote = true;
204 /* fallthrough: quads with helper lanes only might be discarded entirely */
205 case nir_intrinsic_discard:
206 case nir_intrinsic_discard_if:
207 assert(shader->info.stage == MESA_SHADER_FRAGMENT);
208 shader->info.fs.uses_discard = true;
209 break;
210
211 case nir_intrinsic_interp_deref_at_centroid:
212 case nir_intrinsic_interp_deref_at_sample:
213 case nir_intrinsic_interp_deref_at_offset:
214 case nir_intrinsic_interp_deref_at_vertex:
215 case nir_intrinsic_load_deref:
216 case nir_intrinsic_store_deref:{
217 nir_deref_instr *deref = nir_src_as_deref(instr->src[0]);
218 if (deref->mode == nir_var_shader_in ||
219 deref->mode == nir_var_shader_out) {
220 nir_variable *var = nir_deref_instr_get_variable(deref);
221 bool is_output_read = false;
222 if (var->data.mode == nir_var_shader_out &&
223 instr->intrinsic == nir_intrinsic_load_deref)
224 is_output_read = true;
225
226 if (!try_mask_partial_io(shader, var, deref, is_output_read))
227 mark_whole_variable(shader, var, is_output_read);
228
229 /* We need to track which input_reads bits correspond to a
230 * dvec3/dvec4 input attribute */
231 if (shader->info.stage == MESA_SHADER_VERTEX &&
232 var->data.mode == nir_var_shader_in &&
233 glsl_type_is_dual_slot(glsl_without_array(var->type))) {
234 for (unsigned i = 0; i < glsl_count_attribute_slots(var->type, false); i++) {
235 int idx = var->data.location + i;
236 shader->info.vs.double_inputs |= BITFIELD64_BIT(idx);
237 }
238 }
239 }
240 break;
241 }
242
243 case nir_intrinsic_load_draw_id:
244 case nir_intrinsic_load_frag_coord:
245 case nir_intrinsic_load_point_coord:
246 case nir_intrinsic_load_front_face:
247 case nir_intrinsic_load_vertex_id:
248 case nir_intrinsic_load_vertex_id_zero_base:
249 case nir_intrinsic_load_base_vertex:
250 case nir_intrinsic_load_first_vertex:
251 case nir_intrinsic_load_is_indexed_draw:
252 case nir_intrinsic_load_base_instance:
253 case nir_intrinsic_load_instance_id:
254 case nir_intrinsic_load_sample_id:
255 case nir_intrinsic_load_sample_pos:
256 case nir_intrinsic_load_sample_mask_in:
257 case nir_intrinsic_load_primitive_id:
258 case nir_intrinsic_load_invocation_id:
259 case nir_intrinsic_load_local_invocation_id:
260 case nir_intrinsic_load_local_invocation_index:
261 case nir_intrinsic_load_work_group_id:
262 case nir_intrinsic_load_num_work_groups:
263 case nir_intrinsic_load_tess_coord:
264 case nir_intrinsic_load_tess_level_outer:
265 case nir_intrinsic_load_tess_level_inner:
266 case nir_intrinsic_load_patch_vertices_in:
267 shader->info.system_values_read |=
268 (1ull << nir_system_value_from_intrinsic(instr->intrinsic));
269 break;
270
271 case nir_intrinsic_quad_broadcast:
272 case nir_intrinsic_quad_swap_horizontal:
273 case nir_intrinsic_quad_swap_vertical:
274 case nir_intrinsic_quad_swap_diagonal:
275 if (shader->info.stage == MESA_SHADER_FRAGMENT)
276 shader->info.fs.needs_helper_invocations = true;
277 break;
278
279 case nir_intrinsic_end_primitive:
280 case nir_intrinsic_end_primitive_with_counter:
281 assert(shader->info.stage == MESA_SHADER_GEOMETRY);
282 shader->info.gs.uses_end_primitive = 1;
283 /* fall through */
284
285 case nir_intrinsic_emit_vertex:
286 case nir_intrinsic_emit_vertex_with_counter:
287 if (nir_intrinsic_stream_id(instr) > 0)
288 shader->info.gs.uses_streams = true;
289
290 break;
291
292 default:
293 break;
294 }
295 }
296
297 static void
298 gather_tex_info(nir_tex_instr *instr, nir_shader *shader)
299 {
300 if (shader->info.stage == MESA_SHADER_FRAGMENT &&
301 nir_tex_instr_has_implicit_derivative(instr))
302 shader->info.fs.needs_helper_invocations = true;
303
304 switch (instr->op) {
305 case nir_texop_tg4:
306 shader->info.uses_texture_gather = true;
307 break;
308 default:
309 break;
310 }
311 }
312
313 static void
314 gather_alu_info(nir_alu_instr *instr, nir_shader *shader)
315 {
316 switch (instr->op) {
317 case nir_op_fddx:
318 case nir_op_fddy:
319 shader->info.uses_fddx_fddy = true;
320 /* Fall through */
321 case nir_op_fddx_fine:
322 case nir_op_fddy_fine:
323 case nir_op_fddx_coarse:
324 case nir_op_fddy_coarse:
325 if (shader->info.stage == MESA_SHADER_FRAGMENT)
326 shader->info.fs.needs_helper_invocations = true;
327 break;
328 default:
329 break;
330 }
331
332 shader->info.uses_64bit |= instr->dest.dest.ssa.bit_size == 64;
333 unsigned num_srcs = nir_op_infos[instr->op].num_inputs;
334 for (unsigned i = 0; i < num_srcs; i++) {
335 shader->info.uses_64bit |= nir_src_bit_size(instr->src[i].src) == 64;
336 }
337 }
338
339 static void
340 gather_info_block(nir_block *block, nir_shader *shader, void *dead_ctx)
341 {
342 nir_foreach_instr(instr, block) {
343 switch (instr->type) {
344 case nir_instr_type_alu:
345 gather_alu_info(nir_instr_as_alu(instr), shader);
346 break;
347 case nir_instr_type_intrinsic:
348 gather_intrinsic_info(nir_instr_as_intrinsic(instr), shader, dead_ctx);
349 break;
350 case nir_instr_type_tex:
351 gather_tex_info(nir_instr_as_tex(instr), shader);
352 break;
353 case nir_instr_type_call:
354 assert(!"nir_shader_gather_info only works if functions are inlined");
355 break;
356 default:
357 break;
358 }
359 }
360 }
361
362 void
363 nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
364 {
365 shader->info.num_textures = 0;
366 shader->info.num_images = 0;
367 shader->info.last_msaa_image = -1;
368 nir_foreach_variable(var, &shader->uniforms) {
369 /* Bindless textures and images don't use non-bindless slots. */
370 if (var->data.bindless)
371 continue;
372
373 shader->info.num_textures += glsl_type_get_sampler_count(var->type);
374 shader->info.num_images += glsl_type_get_image_count(var->type);
375
376 /* Assuming image slots don't have holes (e.g. OpenGL) */
377 if (glsl_type_is_image(var->type) &&
378 glsl_get_sampler_dim(var->type) == GLSL_SAMPLER_DIM_MS)
379 shader->info.last_msaa_image = shader->info.num_images - 1;
380 }
381
382 shader->info.inputs_read = 0;
383 shader->info.outputs_written = 0;
384 shader->info.outputs_read = 0;
385 shader->info.patch_outputs_read = 0;
386 shader->info.patch_inputs_read = 0;
387 shader->info.patch_outputs_written = 0;
388 shader->info.system_values_read = 0;
389 if (shader->info.stage == MESA_SHADER_VERTEX) {
390 shader->info.vs.double_inputs = 0;
391 }
392 if (shader->info.stage == MESA_SHADER_FRAGMENT) {
393 shader->info.fs.uses_sample_qualifier = false;
394 shader->info.fs.uses_discard = false;
395 shader->info.fs.uses_demote = false;
396 shader->info.fs.needs_helper_invocations = false;
397 }
398
399 void *dead_ctx = ralloc_context(NULL);
400 nir_foreach_block(block, entrypoint) {
401 gather_info_block(block, shader, dead_ctx);
402 }
403 ralloc_free(dead_ctx);
404 }