nir: add support for counting AoA uniforms in nir_shader_gather_info()
[mesa.git] / src / compiler / nir / nir_gather_info.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/mtypes.h"
25 #include "nir.h"
26
27 static void
28 set_io_mask(nir_shader *shader, nir_variable *var, int offset, int len)
29 {
30 for (int i = 0; i < len; i++) {
31 assert(var->data.location != -1);
32
33 int idx = var->data.location + offset + i;
34 bool is_patch_generic = var->data.patch &&
35 idx != VARYING_SLOT_TESS_LEVEL_INNER &&
36 idx != VARYING_SLOT_TESS_LEVEL_OUTER &&
37 idx != VARYING_SLOT_BOUNDING_BOX0 &&
38 idx != VARYING_SLOT_BOUNDING_BOX1;
39 uint64_t bitfield;
40
41 if (is_patch_generic) {
42 assert(idx >= VARYING_SLOT_PATCH0 && idx < VARYING_SLOT_TESS_MAX);
43 bitfield = BITFIELD64_BIT(idx - VARYING_SLOT_PATCH0);
44 }
45 else {
46 assert(idx < VARYING_SLOT_MAX);
47 bitfield = BITFIELD64_BIT(idx);
48 }
49
50 if (var->data.mode == nir_var_shader_in) {
51 if (is_patch_generic)
52 shader->info->patch_inputs_read |= bitfield;
53 else
54 shader->info->inputs_read |= bitfield;
55
56 /* double inputs read is only for vertex inputs */
57 if (shader->stage == MESA_SHADER_VERTEX &&
58 glsl_type_is_dual_slot(glsl_without_array(var->type)))
59 shader->info->double_inputs_read |= bitfield;
60
61 if (shader->stage == MESA_SHADER_FRAGMENT) {
62 shader->info->fs.uses_sample_qualifier |= var->data.sample;
63 }
64 } else {
65 assert(var->data.mode == nir_var_shader_out);
66 if (is_patch_generic) {
67 shader->info->patch_outputs_written |= bitfield;
68 } else if (!var->data.read_only) {
69 shader->info->outputs_written |= bitfield;
70 }
71
72 if (var->data.fb_fetch_output)
73 shader->info->outputs_read |= bitfield;
74 }
75 }
76 }
77
78 /**
79 * Mark an entire variable as used. Caller must ensure that the variable
80 * represents a shader input or output.
81 */
82 static void
83 mark_whole_variable(nir_shader *shader, nir_variable *var)
84 {
85 const struct glsl_type *type = var->type;
86 bool is_vertex_input = false;
87
88 if (nir_is_per_vertex_io(var, shader->stage)) {
89 assert(glsl_type_is_array(type));
90 type = glsl_get_array_element(type);
91 }
92
93 if (shader->stage == MESA_SHADER_VERTEX &&
94 var->data.mode == nir_var_shader_in)
95 is_vertex_input = true;
96
97 set_io_mask(shader, var, 0,
98 glsl_count_attribute_slots(type, is_vertex_input));
99 }
100
101 static unsigned
102 get_io_offset(nir_deref_var *deref, bool is_vertex_input)
103 {
104 unsigned offset = 0;
105
106 nir_deref *tail = &deref->deref;
107 while (tail->child != NULL) {
108 tail = tail->child;
109
110 if (tail->deref_type == nir_deref_type_array) {
111 nir_deref_array *deref_array = nir_deref_as_array(tail);
112
113 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
114 return -1;
115 }
116
117 offset += glsl_count_attribute_slots(tail->type, is_vertex_input) *
118 deref_array->base_offset;
119 }
120 /* TODO: we can get the offset for structs here see nir_lower_io() */
121 }
122
123 return offset;
124 }
125
126 /**
127 * Try to mark a portion of the given varying as used. Caller must ensure
128 * that the variable represents a shader input or output.
129 *
130 * If the index can't be interpreted as a constant, or some other problem
131 * occurs, then nothing will be marked and false will be returned.
132 */
133 static bool
134 try_mask_partial_io(nir_shader *shader, nir_deref_var *deref)
135 {
136 nir_variable *var = deref->var;
137 const struct glsl_type *type = var->type;
138
139 if (nir_is_per_vertex_io(var, shader->stage)) {
140 assert(glsl_type_is_array(type));
141 type = glsl_get_array_element(type);
142 }
143
144 /* The code below only handles:
145 *
146 * - Indexing into matrices
147 * - Indexing into arrays of (arrays, matrices, vectors, or scalars)
148 *
149 * For now, we just give up if we see varying structs and arrays of structs
150 * here marking the entire variable as used.
151 */
152 if (!(glsl_type_is_matrix(type) ||
153 (glsl_type_is_array(type) &&
154 (glsl_type_is_numeric(glsl_without_array(type)) ||
155 glsl_type_is_boolean(glsl_without_array(type)))))) {
156
157 /* If we don't know how to handle this case, give up and let the
158 * caller mark the whole variable as used.
159 */
160 return false;
161 }
162
163 bool is_vertex_input = false;
164 if (shader->stage == MESA_SHADER_VERTEX &&
165 var->data.mode == nir_var_shader_in)
166 is_vertex_input = true;
167
168 unsigned offset = get_io_offset(deref, is_vertex_input);
169 if (offset == -1)
170 return false;
171
172 unsigned num_elems;
173 unsigned elem_width = 1;
174 unsigned mat_cols = 1;
175 if (glsl_type_is_array(type)) {
176 num_elems = glsl_get_aoa_size(type);
177 if (glsl_type_is_matrix(glsl_without_array(type)))
178 mat_cols = glsl_get_matrix_columns(glsl_without_array(type));
179 } else {
180 num_elems = glsl_get_matrix_columns(type);
181 }
182
183 /* double element width for double types that takes two slots */
184 if (!is_vertex_input &&
185 glsl_type_is_dual_slot(glsl_without_array(type))) {
186 elem_width *= 2;
187 }
188
189 if (offset >= num_elems * elem_width * mat_cols) {
190 /* Constant index outside the bounds of the matrix/array. This could
191 * arise as a result of constant folding of a legal GLSL program.
192 *
193 * Even though the spec says that indexing outside the bounds of a
194 * matrix/array results in undefined behaviour, we don't want to pass
195 * out-of-range values to set_io_mask() (since this could result in
196 * slots that don't exist being marked as used), so just let the caller
197 * mark the whole variable as used.
198 */
199 return false;
200 }
201
202 set_io_mask(shader, var, offset, elem_width);
203 return true;
204 }
205
206 static void
207 gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader)
208 {
209 switch (instr->intrinsic) {
210 case nir_intrinsic_discard:
211 case nir_intrinsic_discard_if:
212 assert(shader->stage == MESA_SHADER_FRAGMENT);
213 shader->info->fs.uses_discard = true;
214 break;
215
216 case nir_intrinsic_interp_var_at_centroid:
217 case nir_intrinsic_interp_var_at_sample:
218 case nir_intrinsic_interp_var_at_offset:
219 case nir_intrinsic_load_var:
220 case nir_intrinsic_store_var:
221 if (instr->variables[0]->var->data.mode == nir_var_shader_in ||
222 instr->variables[0]->var->data.mode == nir_var_shader_out) {
223 if (!try_mask_partial_io(shader, instr->variables[0]))
224 mark_whole_variable(shader, instr->variables[0]->var);
225 }
226 break;
227
228 case nir_intrinsic_load_draw_id:
229 case nir_intrinsic_load_front_face:
230 case nir_intrinsic_load_vertex_id:
231 case nir_intrinsic_load_vertex_id_zero_base:
232 case nir_intrinsic_load_base_vertex:
233 case nir_intrinsic_load_base_instance:
234 case nir_intrinsic_load_instance_id:
235 case nir_intrinsic_load_sample_id:
236 case nir_intrinsic_load_sample_pos:
237 case nir_intrinsic_load_sample_mask_in:
238 case nir_intrinsic_load_primitive_id:
239 case nir_intrinsic_load_invocation_id:
240 case nir_intrinsic_load_local_invocation_id:
241 case nir_intrinsic_load_local_invocation_index:
242 case nir_intrinsic_load_work_group_id:
243 case nir_intrinsic_load_num_work_groups:
244 case nir_intrinsic_load_tess_coord:
245 case nir_intrinsic_load_tess_level_outer:
246 case nir_intrinsic_load_tess_level_inner:
247 shader->info->system_values_read |=
248 (1 << nir_system_value_from_intrinsic(instr->intrinsic));
249 break;
250
251 case nir_intrinsic_end_primitive:
252 case nir_intrinsic_end_primitive_with_counter:
253 assert(shader->stage == MESA_SHADER_GEOMETRY);
254 shader->info->gs.uses_end_primitive = 1;
255 break;
256
257 default:
258 break;
259 }
260 }
261
262 static void
263 gather_tex_info(nir_tex_instr *instr, nir_shader *shader)
264 {
265 if (instr->op == nir_texop_tg4)
266 shader->info->uses_texture_gather = true;
267 }
268
269 static void
270 gather_info_block(nir_block *block, nir_shader *shader)
271 {
272 nir_foreach_instr(instr, block) {
273 switch (instr->type) {
274 case nir_instr_type_intrinsic:
275 gather_intrinsic_info(nir_instr_as_intrinsic(instr), shader);
276 break;
277 case nir_instr_type_tex:
278 gather_tex_info(nir_instr_as_tex(instr), shader);
279 break;
280 case nir_instr_type_call:
281 assert(!"nir_shader_gather_info only works if functions are inlined");
282 break;
283 default:
284 break;
285 }
286 }
287 }
288
289 void
290 nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint)
291 {
292 shader->info->num_textures = 0;
293 shader->info->num_images = 0;
294 nir_foreach_variable(var, &shader->uniforms) {
295 const struct glsl_type *type = var->type;
296 unsigned count = 1;
297 if (glsl_type_is_array(type)) {
298 count = glsl_get_aoa_size(type);
299 type = glsl_without_array(type);
300 }
301
302 if (glsl_type_is_image(type)) {
303 shader->info->num_images += count;
304 } else if (glsl_type_is_sampler(type)) {
305 shader->info->num_textures += count;
306 }
307 }
308
309 shader->info->inputs_read = 0;
310 shader->info->outputs_written = 0;
311 shader->info->outputs_read = 0;
312 shader->info->double_inputs_read = 0;
313 shader->info->patch_inputs_read = 0;
314 shader->info->patch_outputs_written = 0;
315 shader->info->system_values_read = 0;
316 if (shader->stage == MESA_SHADER_FRAGMENT) {
317 shader->info->fs.uses_sample_qualifier = false;
318 }
319 nir_foreach_block(block, entrypoint) {
320 gather_info_block(block, shader);
321 }
322 }