946f7aa5f234468152dc3044fcbcfde29136dd70
[mesa.git] / src / amd / vulkan / radv_shader_info.c
1 /*
2 * Copyright © 2017 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "radv_private.h"
24 #include "radv_shader.h"
25 #include "nir/nir.h"
26 #include "nir/nir_deref.h"
27 #include "nir/nir_xfb_info.h"
28
29 static void mark_sampler_desc(const nir_variable *var,
30 struct radv_shader_info *info)
31 {
32 info->desc_set_used_mask |= (1 << var->data.descriptor_set);
33 }
34
35 static void mark_ls_output(struct radv_shader_info *info,
36 uint32_t param, int num_slots)
37 {
38 uint64_t mask = (1ull << num_slots) - 1ull;
39 info->vs.ls_outputs_written |= (mask << param);
40 }
41
42 static void mark_tess_output(struct radv_shader_info *info,
43 bool is_patch, uint32_t param, int num_slots)
44 {
45 uint64_t mask = (1ull << num_slots) - 1ull;
46 if (is_patch)
47 info->tcs.patch_outputs_written |= (mask << param);
48 else
49 info->tcs.outputs_written |= (mask << param);
50 }
51
52 static void
53 get_deref_offset(nir_deref_instr *instr,
54 unsigned *const_out)
55 {
56 nir_variable *var = nir_deref_instr_get_variable(instr);
57 nir_deref_path path;
58 unsigned idx_lvl = 1;
59
60 if (var->data.compact) {
61 assert(instr->deref_type == nir_deref_type_array);
62 *const_out = nir_src_as_uint(instr->arr.index);
63 return;
64 }
65
66 nir_deref_path_init(&path, instr, NULL);
67
68 uint32_t const_offset = 0;
69
70 for (; path.path[idx_lvl]; ++idx_lvl) {
71 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
72 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
73 unsigned index = path.path[idx_lvl]->strct.index;
74
75 for (unsigned i = 0; i < index; i++) {
76 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
77 const_offset += glsl_count_attribute_slots(ft, false);
78 }
79 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
80 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, false);
81 if (nir_src_is_const(path.path[idx_lvl]->arr.index))
82 const_offset += nir_src_as_uint(path.path[idx_lvl]->arr.index) * size;
83 } else
84 unreachable("Uhandled deref type in get_deref_instr_offset");
85 }
86
87 *const_out = const_offset;
88
89 nir_deref_path_finish(&path);
90 }
91
92 static void
93 gather_intrinsic_load_deref_info(const nir_shader *nir,
94 const nir_intrinsic_instr *instr,
95 struct radv_shader_info *info)
96 {
97 switch (nir->info.stage) {
98 case MESA_SHADER_VERTEX: {
99 nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
100
101 if (var && var->data.mode == nir_var_shader_in) {
102 unsigned idx = var->data.location;
103 uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
104
105 info->vs.input_usage_mask[idx] |=
106 mask << var->data.location_frac;
107 }
108 break;
109 }
110 default:
111 break;
112 }
113 }
114
115 static uint32_t
116 widen_writemask(uint32_t wrmask)
117 {
118 uint32_t new_wrmask = 0;
119 for(unsigned i = 0; i < 4; i++)
120 new_wrmask |= (wrmask & (1 << i) ? 0x3 : 0x0) << (i * 2);
121 return new_wrmask;
122 }
123
124 static void
125 set_output_usage_mask(const nir_shader *nir, const nir_intrinsic_instr *instr,
126 uint8_t *output_usage_mask)
127 {
128 nir_deref_instr *deref_instr =
129 nir_instr_as_deref(instr->src[0].ssa->parent_instr);
130 nir_variable *var = nir_deref_instr_get_variable(deref_instr);
131 unsigned attrib_count = glsl_count_attribute_slots(deref_instr->type, false);
132 unsigned idx = var->data.location;
133 unsigned comp = var->data.location_frac;
134 unsigned const_offset = 0;
135
136 get_deref_offset(deref_instr, &const_offset);
137
138 if (var->data.compact) {
139 assert(!glsl_type_is_64bit(deref_instr->type));
140 const_offset += comp;
141 output_usage_mask[idx + const_offset / 4] |= 1 << (const_offset % 4);
142 return;
143 }
144
145 uint32_t wrmask = nir_intrinsic_write_mask(instr);
146 if (glsl_type_is_64bit(deref_instr->type))
147 wrmask = widen_writemask(wrmask);
148
149 for (unsigned i = 0; i < attrib_count; i++)
150 output_usage_mask[idx + i + const_offset] |=
151 ((wrmask >> (i * 4)) & 0xf) << comp;
152 }
153
154 static void
155 gather_intrinsic_store_deref_info(const nir_shader *nir,
156 const nir_intrinsic_instr *instr,
157 struct radv_shader_info *info)
158 {
159 nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
160
161 if (var && var->data.mode == nir_var_shader_out) {
162 unsigned idx = var->data.location;
163
164 switch (nir->info.stage) {
165 case MESA_SHADER_VERTEX:
166 set_output_usage_mask(nir, instr,
167 info->vs.output_usage_mask);
168 break;
169 case MESA_SHADER_GEOMETRY:
170 set_output_usage_mask(nir, instr,
171 info->gs.output_usage_mask);
172 break;
173 case MESA_SHADER_TESS_EVAL:
174 set_output_usage_mask(nir, instr,
175 info->tes.output_usage_mask);
176 break;
177 case MESA_SHADER_TESS_CTRL: {
178 unsigned param = shader_io_get_unique_index(idx);
179 const struct glsl_type *type = var->type;
180
181 if (!var->data.patch)
182 type = glsl_get_array_element(var->type);
183
184 unsigned slots =
185 var->data.compact ? DIV_ROUND_UP(var->data.location_frac + glsl_get_length(type), 4)
186 : glsl_count_attribute_slots(type, false);
187
188 mark_tess_output(info, var->data.patch, param, slots);
189 break;
190 }
191 default:
192 break;
193 }
194 }
195 }
196
197 static void
198 gather_push_constant_info(const nir_shader *nir,
199 const nir_intrinsic_instr *instr,
200 struct radv_shader_info *info)
201 {
202 int base = nir_intrinsic_base(instr);
203
204 if (!nir_src_is_const(instr->src[0])) {
205 info->has_indirect_push_constants = true;
206 } else {
207 uint32_t min = base + nir_src_as_uint(instr->src[0]);
208 uint32_t max = min + instr->num_components * 4;
209
210 info->max_push_constant_used =
211 MAX2(max, info->max_push_constant_used);
212 info->min_push_constant_used =
213 MIN2(min, info->min_push_constant_used);
214 }
215
216 if (instr->dest.ssa.bit_size != 32)
217 info->has_only_32bit_push_constants = false;
218
219 info->loads_push_constants = true;
220 }
221
222 static void
223 gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
224 struct radv_shader_info *info)
225 {
226 switch (instr->intrinsic) {
227 case nir_intrinsic_load_barycentric_at_sample:
228 info->ps.needs_sample_positions = true;
229 break;
230 case nir_intrinsic_load_draw_id:
231 info->vs.needs_draw_id = true;
232 break;
233 case nir_intrinsic_load_instance_id:
234 info->vs.needs_instance_id = true;
235 break;
236 case nir_intrinsic_load_num_work_groups:
237 info->cs.uses_grid_size = true;
238 break;
239 case nir_intrinsic_load_local_invocation_id:
240 case nir_intrinsic_load_work_group_id: {
241 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
242 while (mask) {
243 unsigned i = u_bit_scan(&mask);
244
245 if (instr->intrinsic == nir_intrinsic_load_work_group_id)
246 info->cs.uses_block_id[i] = true;
247 else
248 info->cs.uses_thread_id[i] = true;
249 }
250 break;
251 }
252 case nir_intrinsic_load_local_invocation_index:
253 case nir_intrinsic_load_subgroup_id:
254 case nir_intrinsic_load_num_subgroups:
255 info->cs.uses_local_invocation_idx = true;
256 break;
257 case nir_intrinsic_load_sample_id:
258 info->ps.force_persample = true;
259 break;
260 case nir_intrinsic_load_sample_pos:
261 info->ps.force_persample = true;
262 break;
263 case nir_intrinsic_load_view_index:
264 info->needs_multiview_view_index = true;
265 if (nir->info.stage == MESA_SHADER_FRAGMENT)
266 info->ps.layer_input = true;
267 break;
268 case nir_intrinsic_load_layer_id:
269 if (nir->info.stage == MESA_SHADER_FRAGMENT)
270 info->ps.layer_input = true;
271 break;
272 case nir_intrinsic_load_invocation_id:
273 info->uses_invocation_id = true;
274 break;
275 case nir_intrinsic_load_primitive_id:
276 info->uses_prim_id = true;
277 break;
278 case nir_intrinsic_load_push_constant:
279 gather_push_constant_info(nir, instr, info);
280 break;
281 case nir_intrinsic_vulkan_resource_index:
282 info->desc_set_used_mask |= (1 << nir_intrinsic_desc_set(instr));
283 break;
284 case nir_intrinsic_image_deref_load:
285 case nir_intrinsic_image_deref_store:
286 case nir_intrinsic_image_deref_atomic_add:
287 case nir_intrinsic_image_deref_atomic_min:
288 case nir_intrinsic_image_deref_atomic_max:
289 case nir_intrinsic_image_deref_atomic_and:
290 case nir_intrinsic_image_deref_atomic_or:
291 case nir_intrinsic_image_deref_atomic_xor:
292 case nir_intrinsic_image_deref_atomic_exchange:
293 case nir_intrinsic_image_deref_atomic_comp_swap:
294 case nir_intrinsic_image_deref_size: {
295 nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
296 mark_sampler_desc(var, info);
297
298 if (instr->intrinsic == nir_intrinsic_image_deref_store ||
299 instr->intrinsic == nir_intrinsic_image_deref_atomic_add ||
300 instr->intrinsic == nir_intrinsic_image_deref_atomic_min ||
301 instr->intrinsic == nir_intrinsic_image_deref_atomic_max ||
302 instr->intrinsic == nir_intrinsic_image_deref_atomic_and ||
303 instr->intrinsic == nir_intrinsic_image_deref_atomic_or ||
304 instr->intrinsic == nir_intrinsic_image_deref_atomic_xor ||
305 instr->intrinsic == nir_intrinsic_image_deref_atomic_exchange ||
306 instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
307 if (nir->info.stage == MESA_SHADER_FRAGMENT)
308 info->ps.writes_memory = true;
309 }
310 break;
311 }
312 case nir_intrinsic_store_ssbo:
313 case nir_intrinsic_ssbo_atomic_add:
314 case nir_intrinsic_ssbo_atomic_imin:
315 case nir_intrinsic_ssbo_atomic_umin:
316 case nir_intrinsic_ssbo_atomic_imax:
317 case nir_intrinsic_ssbo_atomic_umax:
318 case nir_intrinsic_ssbo_atomic_and:
319 case nir_intrinsic_ssbo_atomic_or:
320 case nir_intrinsic_ssbo_atomic_xor:
321 case nir_intrinsic_ssbo_atomic_exchange:
322 case nir_intrinsic_ssbo_atomic_comp_swap:
323 if (nir->info.stage == MESA_SHADER_FRAGMENT)
324 info->ps.writes_memory = true;
325 break;
326 case nir_intrinsic_load_deref:
327 gather_intrinsic_load_deref_info(nir, instr, info);
328 break;
329 case nir_intrinsic_store_deref:
330 gather_intrinsic_store_deref_info(nir, instr, info);
331 break;
332 default:
333 break;
334 }
335 }
336
337 static void
338 gather_tex_info(const nir_shader *nir, const nir_tex_instr *instr,
339 struct radv_shader_info *info)
340 {
341 for (unsigned i = 0; i < instr->num_srcs; i++) {
342 switch (instr->src[i].src_type) {
343 case nir_tex_src_texture_deref:
344 mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
345 break;
346 case nir_tex_src_sampler_deref:
347 mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
348 break;
349 default:
350 break;
351 }
352 }
353 }
354
355 static void
356 gather_info_block(const nir_shader *nir, const nir_block *block,
357 struct radv_shader_info *info)
358 {
359 nir_foreach_instr(instr, block) {
360 switch (instr->type) {
361 case nir_instr_type_intrinsic:
362 gather_intrinsic_info(nir, nir_instr_as_intrinsic(instr), info);
363 break;
364 case nir_instr_type_tex:
365 gather_tex_info(nir, nir_instr_as_tex(instr), info);
366 break;
367 default:
368 break;
369 }
370 }
371 }
372
373 static void
374 gather_info_input_decl_vs(const nir_shader *nir, const nir_variable *var,
375 struct radv_shader_info *info,
376 const struct radv_nir_compiler_options *options)
377 {
378 unsigned attrib_count = glsl_count_attribute_slots(var->type, true);
379 int idx = var->data.location;
380
381 if (idx >= VERT_ATTRIB_GENERIC0 && idx <= VERT_ATTRIB_GENERIC15)
382 info->vs.has_vertex_buffers = true;
383
384 for (unsigned i = 0; i < attrib_count; ++i) {
385 unsigned attrib_index = var->data.location + i - VERT_ATTRIB_GENERIC0;
386
387 if (options->key.vs.instance_rate_inputs & (1u << attrib_index))
388 info->vs.needs_instance_id = true;
389 }
390 }
391
392 static void
393 gather_info_input_decl_ps(const nir_shader *nir, const nir_variable *var,
394 struct radv_shader_info *info)
395 {
396 unsigned attrib_count = glsl_count_attribute_slots(var->type, false);
397 const struct glsl_type *type = glsl_without_array(var->type);
398 int idx = var->data.location;
399
400 switch (idx) {
401 case VARYING_SLOT_PNTC:
402 info->ps.has_pcoord = true;
403 break;
404 case VARYING_SLOT_PRIMITIVE_ID:
405 info->ps.prim_id_input = true;
406 break;
407 case VARYING_SLOT_LAYER:
408 info->ps.layer_input = true;
409 break;
410 case VARYING_SLOT_CLIP_DIST0:
411 case VARYING_SLOT_CLIP_DIST1:
412 info->ps.num_input_clips_culls += attrib_count;
413 break;
414 default:
415 break;
416 }
417
418 if (glsl_get_base_type(type) == GLSL_TYPE_FLOAT) {
419 if (var->data.sample)
420 info->ps.force_persample = true;
421 }
422 }
423
424 static void
425 gather_info_input_decl(const nir_shader *nir, const nir_variable *var,
426 struct radv_shader_info *info,
427 const struct radv_nir_compiler_options *options)
428 {
429 switch (nir->info.stage) {
430 case MESA_SHADER_VERTEX:
431 gather_info_input_decl_vs(nir, var, info, options);
432 break;
433 case MESA_SHADER_FRAGMENT:
434 gather_info_input_decl_ps(nir, var, info);
435 break;
436 default:
437 break;
438 }
439 }
440
441 static void
442 gather_info_output_decl_ls(const nir_shader *nir, const nir_variable *var,
443 struct radv_shader_info *info)
444 {
445 int idx = var->data.location;
446 unsigned param = shader_io_get_unique_index(idx);
447 int num_slots = glsl_count_attribute_slots(var->type, false);
448 if (var->data.compact)
449 num_slots = DIV_ROUND_UP(var->data.location_frac + glsl_get_length(var->type), 4);
450 mark_ls_output(info, param, num_slots);
451 }
452
453 static void
454 gather_info_output_decl_ps(const nir_shader *nir, const nir_variable *var,
455 struct radv_shader_info *info)
456 {
457 int idx = var->data.location;
458
459 switch (idx) {
460 case FRAG_RESULT_DEPTH:
461 info->ps.writes_z = true;
462 break;
463 case FRAG_RESULT_STENCIL:
464 info->ps.writes_stencil = true;
465 break;
466 case FRAG_RESULT_SAMPLE_MASK:
467 info->ps.writes_sample_mask = true;
468 break;
469 default:
470 break;
471 }
472 }
473
474 static void
475 gather_info_output_decl_gs(const nir_shader *nir, const nir_variable *var,
476 struct radv_shader_info *info)
477 {
478 unsigned num_components = glsl_get_component_slots(var->type);
479 unsigned stream = var->data.stream;
480 unsigned idx = var->data.location;
481
482 assert(stream < 4);
483
484 info->gs.max_stream = MAX2(info->gs.max_stream, stream);
485 info->gs.num_stream_output_components[stream] += num_components;
486 info->gs.output_streams[idx] = stream;
487 }
488
489 static void
490 gather_info_output_decl(const nir_shader *nir, const nir_variable *var,
491 struct radv_shader_info *info,
492 const struct radv_nir_compiler_options *options)
493 {
494 switch (nir->info.stage) {
495 case MESA_SHADER_FRAGMENT:
496 gather_info_output_decl_ps(nir, var, info);
497 break;
498 case MESA_SHADER_VERTEX:
499 if (options->key.vs_common_out.as_ls)
500 gather_info_output_decl_ls(nir, var, info);
501 break;
502 case MESA_SHADER_GEOMETRY:
503 gather_info_output_decl_gs(nir, var, info);
504 break;
505 default:
506 break;
507 }
508 }
509
510 static void
511 gather_xfb_info(const nir_shader *nir, struct radv_shader_info *info)
512 {
513 nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
514 struct radv_streamout_info *so = &info->so;
515
516 if (!xfb)
517 return;
518
519 assert(xfb->output_count < MAX_SO_OUTPUTS);
520 so->num_outputs = xfb->output_count;
521
522 for (unsigned i = 0; i < xfb->output_count; i++) {
523 struct radv_stream_output *output = &so->outputs[i];
524
525 output->buffer = xfb->outputs[i].buffer;
526 output->stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
527 output->offset = xfb->outputs[i].offset;
528 output->location = xfb->outputs[i].location;
529 output->component_mask = xfb->outputs[i].component_mask;
530
531 so->enabled_stream_buffers_mask |=
532 (1 << output->buffer) << (output->stream * 4);
533
534 }
535
536 for (unsigned i = 0; i < NIR_MAX_XFB_BUFFERS; i++) {
537 so->strides[i] = xfb->buffers[i].stride / 4;
538 }
539
540 ralloc_free(xfb);
541 }
542
543 void
544 radv_nir_shader_info_init(struct radv_shader_info *info)
545 {
546 /* Assume that shaders only have 32-bit push constants by default. */
547 info->min_push_constant_used = UINT8_MAX;
548 info->has_only_32bit_push_constants = true;
549 }
550
551 void
552 radv_nir_shader_info_pass(const struct nir_shader *nir,
553 const struct radv_nir_compiler_options *options,
554 struct radv_shader_info *info)
555 {
556 struct nir_function *func =
557 (struct nir_function *)exec_list_get_head_const(&nir->functions);
558
559 if (options->layout && options->layout->dynamic_offset_count &&
560 (options->layout->dynamic_shader_stages & mesa_to_vk_shader_stage(nir->info.stage))) {
561 info->loads_push_constants = true;
562 info->loads_dynamic_offsets = true;
563 }
564
565 nir_foreach_variable(variable, &nir->inputs)
566 gather_info_input_decl(nir, variable, info, options);
567
568 nir_foreach_block(block, func->impl) {
569 gather_info_block(nir, block, info);
570 }
571
572 nir_foreach_variable(variable, &nir->outputs)
573 gather_info_output_decl(nir, variable, info, options);
574
575 if (nir->info.stage == MESA_SHADER_VERTEX ||
576 nir->info.stage == MESA_SHADER_TESS_EVAL ||
577 nir->info.stage == MESA_SHADER_GEOMETRY)
578 gather_xfb_info(nir, info);
579
580 /* Make sure to export the LayerID if the fragment shader needs it. */
581 if (options->key.vs_common_out.export_layer_id) {
582 switch (nir->info.stage) {
583 case MESA_SHADER_VERTEX:
584 info->vs.output_usage_mask[VARYING_SLOT_LAYER] |= 0x1;
585 break;
586 case MESA_SHADER_TESS_EVAL:
587 info->tes.output_usage_mask[VARYING_SLOT_LAYER] |= 0x1;
588 break;
589 case MESA_SHADER_GEOMETRY:
590 info->gs.output_usage_mask[VARYING_SLOT_LAYER] |= 0x1;
591 break;
592 default:
593 break;
594 }
595 }
596 }