62a5f170f7cf4e68f660882bb65561576fa5da86
[mesa.git] / src / amd / vulkan / radv_shader_info.c
1 /*
2 * Copyright © 2017 Red Hat
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23 #include "radv_private.h"
24 #include "radv_shader.h"
25 #include "nir/nir.h"
26 #include "nir/nir_deref.h"
27 #include "nir/nir_xfb_info.h"
28
29 static void mark_sampler_desc(const nir_variable *var,
30 struct radv_shader_info *info)
31 {
32 info->desc_set_used_mask |= (1 << var->data.descriptor_set);
33 }
34
35 static void mark_ls_output(struct radv_shader_info *info,
36 uint32_t param, int num_slots)
37 {
38 uint64_t mask = (1ull << num_slots) - 1ull;
39 info->vs.ls_outputs_written |= (mask << param);
40 }
41
42 static void mark_tess_output(struct radv_shader_info *info,
43 bool is_patch, uint32_t param, int num_slots)
44 {
45 uint64_t mask = (1ull << num_slots) - 1ull;
46 if (is_patch)
47 info->tcs.patch_outputs_written |= (mask << param);
48 else
49 info->tcs.outputs_written |= (mask << param);
50 }
51
52 static void
53 get_deref_offset(nir_deref_instr *instr,
54 unsigned *const_out)
55 {
56 nir_variable *var = nir_deref_instr_get_variable(instr);
57 nir_deref_path path;
58 unsigned idx_lvl = 1;
59
60 if (var->data.compact) {
61 assert(instr->deref_type == nir_deref_type_array);
62 *const_out = nir_src_as_uint(instr->arr.index);
63 return;
64 }
65
66 nir_deref_path_init(&path, instr, NULL);
67
68 uint32_t const_offset = 0;
69
70 for (; path.path[idx_lvl]; ++idx_lvl) {
71 const struct glsl_type *parent_type = path.path[idx_lvl - 1]->type;
72 if (path.path[idx_lvl]->deref_type == nir_deref_type_struct) {
73 unsigned index = path.path[idx_lvl]->strct.index;
74
75 for (unsigned i = 0; i < index; i++) {
76 const struct glsl_type *ft = glsl_get_struct_field(parent_type, i);
77 const_offset += glsl_count_attribute_slots(ft, false);
78 }
79 } else if(path.path[idx_lvl]->deref_type == nir_deref_type_array) {
80 unsigned size = glsl_count_attribute_slots(path.path[idx_lvl]->type, false);
81 if (nir_src_is_const(path.path[idx_lvl]->arr.index))
82 const_offset += nir_src_as_uint(path.path[idx_lvl]->arr.index) * size;
83 } else
84 unreachable("Uhandled deref type in get_deref_instr_offset");
85 }
86
87 *const_out = const_offset;
88
89 nir_deref_path_finish(&path);
90 }
91
92 static void
93 gather_intrinsic_load_deref_info(const nir_shader *nir,
94 const nir_intrinsic_instr *instr,
95 struct radv_shader_info *info)
96 {
97 switch (nir->info.stage) {
98 case MESA_SHADER_VERTEX: {
99 nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
100
101 if (var && var->data.mode == nir_var_shader_in) {
102 unsigned idx = var->data.location;
103 uint8_t mask = nir_ssa_def_components_read(&instr->dest.ssa);
104
105 info->vs.input_usage_mask[idx] |=
106 mask << var->data.location_frac;
107 }
108 break;
109 }
110 default:
111 break;
112 }
113 }
114
115 static uint32_t
116 widen_writemask(uint32_t wrmask)
117 {
118 uint32_t new_wrmask = 0;
119 for(unsigned i = 0; i < 4; i++)
120 new_wrmask |= (wrmask & (1 << i) ? 0x3 : 0x0) << (i * 2);
121 return new_wrmask;
122 }
123
124 static void
125 set_output_usage_mask(const nir_shader *nir, const nir_intrinsic_instr *instr,
126 uint8_t *output_usage_mask)
127 {
128 nir_deref_instr *deref_instr =
129 nir_instr_as_deref(instr->src[0].ssa->parent_instr);
130 nir_variable *var = nir_deref_instr_get_variable(deref_instr);
131 unsigned attrib_count = glsl_count_attribute_slots(deref_instr->type, false);
132 unsigned idx = var->data.location;
133 unsigned comp = var->data.location_frac;
134 unsigned const_offset = 0;
135
136 get_deref_offset(deref_instr, &const_offset);
137
138 if (var->data.compact) {
139 assert(!glsl_type_is_64bit(deref_instr->type));
140 const_offset += comp;
141 output_usage_mask[idx + const_offset / 4] |= 1 << (const_offset % 4);
142 return;
143 }
144
145 uint32_t wrmask = nir_intrinsic_write_mask(instr);
146 if (glsl_type_is_64bit(deref_instr->type))
147 wrmask = widen_writemask(wrmask);
148
149 for (unsigned i = 0; i < attrib_count; i++)
150 output_usage_mask[idx + i + const_offset] |=
151 ((wrmask >> (i * 4)) & 0xf) << comp;
152 }
153
154 static void
155 set_writes_memory(const nir_shader *nir, struct radv_shader_info *info)
156 {
157 if (nir->info.stage == MESA_SHADER_FRAGMENT)
158 info->ps.writes_memory = true;
159 else if (nir->info.stage == MESA_SHADER_GEOMETRY)
160 info->gs.writes_memory = true;
161 }
162
163 static void
164 gather_intrinsic_store_deref_info(const nir_shader *nir,
165 const nir_intrinsic_instr *instr,
166 struct radv_shader_info *info)
167 {
168 nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
169
170 if (var && var->data.mode == nir_var_shader_out) {
171 unsigned idx = var->data.location;
172
173 switch (nir->info.stage) {
174 case MESA_SHADER_VERTEX:
175 set_output_usage_mask(nir, instr,
176 info->vs.output_usage_mask);
177 break;
178 case MESA_SHADER_GEOMETRY:
179 set_output_usage_mask(nir, instr,
180 info->gs.output_usage_mask);
181 break;
182 case MESA_SHADER_TESS_EVAL:
183 set_output_usage_mask(nir, instr,
184 info->tes.output_usage_mask);
185 break;
186 case MESA_SHADER_TESS_CTRL: {
187 unsigned param = shader_io_get_unique_index(idx);
188 const struct glsl_type *type = var->type;
189
190 if (!var->data.patch)
191 type = glsl_get_array_element(var->type);
192
193 unsigned slots =
194 var->data.compact ? DIV_ROUND_UP(var->data.location_frac + glsl_get_length(type), 4)
195 : glsl_count_attribute_slots(type, false);
196
197 mark_tess_output(info, var->data.patch, param, slots);
198 break;
199 }
200 default:
201 break;
202 }
203 }
204 }
205
206 static void
207 gather_push_constant_info(const nir_shader *nir,
208 const nir_intrinsic_instr *instr,
209 struct radv_shader_info *info)
210 {
211 int base = nir_intrinsic_base(instr);
212
213 if (!nir_src_is_const(instr->src[0])) {
214 info->has_indirect_push_constants = true;
215 } else {
216 uint32_t min = base + nir_src_as_uint(instr->src[0]);
217 uint32_t max = min + instr->num_components * 4;
218
219 info->max_push_constant_used =
220 MAX2(max, info->max_push_constant_used);
221 info->min_push_constant_used =
222 MIN2(min, info->min_push_constant_used);
223 }
224
225 if (instr->dest.ssa.bit_size != 32)
226 info->has_only_32bit_push_constants = false;
227
228 info->loads_push_constants = true;
229 }
230
231 static void
232 gather_intrinsic_info(const nir_shader *nir, const nir_intrinsic_instr *instr,
233 struct radv_shader_info *info)
234 {
235 switch (instr->intrinsic) {
236 case nir_intrinsic_load_barycentric_at_sample:
237 info->ps.needs_sample_positions = true;
238 break;
239 case nir_intrinsic_load_draw_id:
240 info->vs.needs_draw_id = true;
241 break;
242 case nir_intrinsic_load_instance_id:
243 info->vs.needs_instance_id = true;
244 break;
245 case nir_intrinsic_load_num_work_groups:
246 info->cs.uses_grid_size = true;
247 break;
248 case nir_intrinsic_load_local_invocation_id:
249 case nir_intrinsic_load_work_group_id: {
250 unsigned mask = nir_ssa_def_components_read(&instr->dest.ssa);
251 while (mask) {
252 unsigned i = u_bit_scan(&mask);
253
254 if (instr->intrinsic == nir_intrinsic_load_work_group_id)
255 info->cs.uses_block_id[i] = true;
256 else
257 info->cs.uses_thread_id[i] = true;
258 }
259 break;
260 }
261 case nir_intrinsic_load_local_invocation_index:
262 case nir_intrinsic_load_subgroup_id:
263 case nir_intrinsic_load_num_subgroups:
264 info->cs.uses_local_invocation_idx = true;
265 break;
266 case nir_intrinsic_load_sample_id:
267 info->ps.force_persample = true;
268 break;
269 case nir_intrinsic_load_sample_pos:
270 info->ps.force_persample = true;
271 break;
272 case nir_intrinsic_load_view_index:
273 info->needs_multiview_view_index = true;
274 if (nir->info.stage == MESA_SHADER_FRAGMENT)
275 info->ps.layer_input = true;
276 break;
277 case nir_intrinsic_load_layer_id:
278 if (nir->info.stage == MESA_SHADER_FRAGMENT)
279 info->ps.layer_input = true;
280 break;
281 case nir_intrinsic_load_invocation_id:
282 info->uses_invocation_id = true;
283 break;
284 case nir_intrinsic_load_primitive_id:
285 info->uses_prim_id = true;
286 break;
287 case nir_intrinsic_load_push_constant:
288 gather_push_constant_info(nir, instr, info);
289 break;
290 case nir_intrinsic_vulkan_resource_index:
291 info->desc_set_used_mask |= (1 << nir_intrinsic_desc_set(instr));
292 break;
293 case nir_intrinsic_image_deref_load:
294 case nir_intrinsic_image_deref_store:
295 case nir_intrinsic_image_deref_atomic_add:
296 case nir_intrinsic_image_deref_atomic_imin:
297 case nir_intrinsic_image_deref_atomic_umin:
298 case nir_intrinsic_image_deref_atomic_imax:
299 case nir_intrinsic_image_deref_atomic_umax:
300 case nir_intrinsic_image_deref_atomic_and:
301 case nir_intrinsic_image_deref_atomic_or:
302 case nir_intrinsic_image_deref_atomic_xor:
303 case nir_intrinsic_image_deref_atomic_exchange:
304 case nir_intrinsic_image_deref_atomic_comp_swap:
305 case nir_intrinsic_image_deref_size: {
306 nir_variable *var = nir_deref_instr_get_variable(nir_instr_as_deref(instr->src[0].ssa->parent_instr));
307 mark_sampler_desc(var, info);
308
309 if (instr->intrinsic == nir_intrinsic_image_deref_store ||
310 instr->intrinsic == nir_intrinsic_image_deref_atomic_add ||
311 instr->intrinsic == nir_intrinsic_image_deref_atomic_imin ||
312 instr->intrinsic == nir_intrinsic_image_deref_atomic_umin ||
313 instr->intrinsic == nir_intrinsic_image_deref_atomic_imax ||
314 instr->intrinsic == nir_intrinsic_image_deref_atomic_umax ||
315 instr->intrinsic == nir_intrinsic_image_deref_atomic_and ||
316 instr->intrinsic == nir_intrinsic_image_deref_atomic_or ||
317 instr->intrinsic == nir_intrinsic_image_deref_atomic_xor ||
318 instr->intrinsic == nir_intrinsic_image_deref_atomic_exchange ||
319 instr->intrinsic == nir_intrinsic_image_deref_atomic_comp_swap) {
320 set_writes_memory(nir, info);
321 }
322 break;
323 }
324 case nir_intrinsic_store_ssbo:
325 case nir_intrinsic_ssbo_atomic_add:
326 case nir_intrinsic_ssbo_atomic_imin:
327 case nir_intrinsic_ssbo_atomic_umin:
328 case nir_intrinsic_ssbo_atomic_imax:
329 case nir_intrinsic_ssbo_atomic_umax:
330 case nir_intrinsic_ssbo_atomic_and:
331 case nir_intrinsic_ssbo_atomic_or:
332 case nir_intrinsic_ssbo_atomic_xor:
333 case nir_intrinsic_ssbo_atomic_exchange:
334 case nir_intrinsic_ssbo_atomic_comp_swap:
335 set_writes_memory(nir, info);
336 break;
337 case nir_intrinsic_load_deref:
338 gather_intrinsic_load_deref_info(nir, instr, info);
339 break;
340 case nir_intrinsic_store_deref:
341 gather_intrinsic_store_deref_info(nir, instr, info);
342 /* fallthrough */
343 case nir_intrinsic_deref_atomic_add:
344 case nir_intrinsic_deref_atomic_imin:
345 case nir_intrinsic_deref_atomic_umin:
346 case nir_intrinsic_deref_atomic_imax:
347 case nir_intrinsic_deref_atomic_umax:
348 case nir_intrinsic_deref_atomic_and:
349 case nir_intrinsic_deref_atomic_or:
350 case nir_intrinsic_deref_atomic_xor:
351 case nir_intrinsic_deref_atomic_exchange:
352 case nir_intrinsic_deref_atomic_comp_swap: {
353 if (nir_src_as_deref(instr->src[0])->mode & (nir_var_mem_global | nir_var_mem_ssbo))
354 set_writes_memory(nir, info);
355 break;
356 }
357 default:
358 break;
359 }
360 }
361
362 static void
363 gather_tex_info(const nir_shader *nir, const nir_tex_instr *instr,
364 struct radv_shader_info *info)
365 {
366 for (unsigned i = 0; i < instr->num_srcs; i++) {
367 switch (instr->src[i].src_type) {
368 case nir_tex_src_texture_deref:
369 mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
370 break;
371 case nir_tex_src_sampler_deref:
372 mark_sampler_desc(nir_deref_instr_get_variable(nir_src_as_deref(instr->src[i].src)), info);
373 break;
374 default:
375 break;
376 }
377 }
378 }
379
380 static void
381 gather_info_block(const nir_shader *nir, const nir_block *block,
382 struct radv_shader_info *info)
383 {
384 nir_foreach_instr(instr, block) {
385 switch (instr->type) {
386 case nir_instr_type_intrinsic:
387 gather_intrinsic_info(nir, nir_instr_as_intrinsic(instr), info);
388 break;
389 case nir_instr_type_tex:
390 gather_tex_info(nir, nir_instr_as_tex(instr), info);
391 break;
392 default:
393 break;
394 }
395 }
396 }
397
398 static void
399 gather_info_input_decl_vs(const nir_shader *nir, const nir_variable *var,
400 struct radv_shader_info *info,
401 const struct radv_shader_variant_key *key)
402 {
403 unsigned attrib_count = glsl_count_attribute_slots(var->type, true);
404 int idx = var->data.location;
405
406 if (idx >= VERT_ATTRIB_GENERIC0 && idx <= VERT_ATTRIB_GENERIC15)
407 info->vs.has_vertex_buffers = true;
408
409 for (unsigned i = 0; i < attrib_count; ++i) {
410 unsigned attrib_index = var->data.location + i - VERT_ATTRIB_GENERIC0;
411
412 if (key->vs.instance_rate_inputs & (1u << attrib_index))
413 info->vs.needs_instance_id = true;
414 }
415 }
416
417 static void
418 mark_16bit_ps_input(struct radv_shader_info *info, const struct glsl_type *type,
419 int location)
420 {
421 if (glsl_type_is_scalar(type) || glsl_type_is_vector(type) || glsl_type_is_matrix(type)) {
422 unsigned attrib_count = glsl_count_attribute_slots(type, false);
423 if (glsl_type_is_16bit(type)) {
424 info->ps.float16_shaded_mask |= ((1ull << attrib_count) - 1) << location;
425 }
426 } else if (glsl_type_is_array(type)) {
427 unsigned stride = glsl_count_attribute_slots(glsl_get_array_element(type), false);
428 for (unsigned i = 0; i < glsl_get_length(type); ++i) {
429 mark_16bit_ps_input(info, glsl_get_array_element(type), location + i * stride);
430 }
431 } else {
432 assert(glsl_type_is_struct_or_ifc(type));
433 for (unsigned i = 0; i < glsl_get_length(type); i++) {
434 mark_16bit_ps_input(info, glsl_get_struct_field(type, i), location);
435 location += glsl_count_attribute_slots(glsl_get_struct_field(type, i), false);
436 }
437 }
438 }
439 static void
440 gather_info_input_decl_ps(const nir_shader *nir, const nir_variable *var,
441 struct radv_shader_info *info)
442 {
443 unsigned attrib_count = glsl_count_attribute_slots(var->type, false);
444 const struct glsl_type *type = glsl_without_array(var->type);
445 int idx = var->data.location;
446
447 switch (idx) {
448 case VARYING_SLOT_PNTC:
449 info->ps.has_pcoord = true;
450 break;
451 case VARYING_SLOT_PRIMITIVE_ID:
452 info->ps.prim_id_input = true;
453 break;
454 case VARYING_SLOT_LAYER:
455 info->ps.layer_input = true;
456 break;
457 case VARYING_SLOT_CLIP_DIST0:
458 case VARYING_SLOT_CLIP_DIST1:
459 info->ps.num_input_clips_culls += attrib_count;
460 break;
461 case VARYING_SLOT_VIEWPORT:
462 info->ps.viewport_index_input = true;
463 break;
464 default:
465 break;
466 }
467
468 if (glsl_get_base_type(type) == GLSL_TYPE_FLOAT) {
469 if (var->data.sample)
470 info->ps.force_persample = true;
471 }
472
473 if (var->data.compact) {
474 unsigned component_count = var->data.location_frac +
475 glsl_get_length(var->type);
476 attrib_count = (component_count + 3) / 4;
477 } else {
478 mark_16bit_ps_input(info, var->type, var->data.driver_location);
479 }
480
481 uint64_t mask = ((1ull << attrib_count) - 1);
482
483 if (var->data.interpolation == INTERP_MODE_FLAT)
484 info->ps.flat_shaded_mask |= mask << var->data.driver_location;
485 if (var->data.interpolation == INTERP_MODE_EXPLICIT)
486 info->ps.explicit_shaded_mask |= mask << var->data.driver_location;
487
488 if (var->data.location >= VARYING_SLOT_VAR0)
489 info->ps.input_mask |= mask << (var->data.location - VARYING_SLOT_VAR0);
490 }
491
492 static void
493 gather_info_input_decl(const nir_shader *nir, const nir_variable *var,
494 struct radv_shader_info *info,
495 const struct radv_shader_variant_key *key)
496 {
497 switch (nir->info.stage) {
498 case MESA_SHADER_VERTEX:
499 gather_info_input_decl_vs(nir, var, info, key);
500 break;
501 case MESA_SHADER_FRAGMENT:
502 gather_info_input_decl_ps(nir, var, info);
503 break;
504 default:
505 break;
506 }
507 }
508
509 static void
510 gather_info_output_decl_ls(const nir_shader *nir, const nir_variable *var,
511 struct radv_shader_info *info)
512 {
513 int idx = var->data.location;
514 unsigned param = shader_io_get_unique_index(idx);
515 int num_slots = glsl_count_attribute_slots(var->type, false);
516 if (var->data.compact)
517 num_slots = DIV_ROUND_UP(var->data.location_frac + glsl_get_length(var->type), 4);
518 mark_ls_output(info, param, num_slots);
519 }
520
521 static void
522 gather_info_output_decl_ps(const nir_shader *nir, const nir_variable *var,
523 struct radv_shader_info *info)
524 {
525 int idx = var->data.location;
526
527 switch (idx) {
528 case FRAG_RESULT_DEPTH:
529 info->ps.writes_z = true;
530 break;
531 case FRAG_RESULT_STENCIL:
532 info->ps.writes_stencil = true;
533 break;
534 case FRAG_RESULT_SAMPLE_MASK:
535 info->ps.writes_sample_mask = true;
536 break;
537 default:
538 break;
539 }
540
541 if (idx >= FRAG_RESULT_DATA0 && idx <= FRAG_RESULT_DATA7) {
542 unsigned num_components = glsl_get_component_slots(glsl_without_array(var->type));
543 unsigned num_slots = glsl_count_attribute_slots(var->type, false);
544 unsigned write_mask = (1 << num_components) - 1;
545 unsigned slot = idx - FRAG_RESULT_DATA0;
546
547 for (unsigned i = 0; i < num_slots; i++) {
548 info->ps.cb_shader_mask |= write_mask << ((slot + i) * 4);
549 }
550 }
551 }
552
553 static void
554 gather_info_output_decl_gs(const nir_shader *nir, const nir_variable *var,
555 struct radv_shader_info *info)
556 {
557 unsigned num_components = glsl_get_component_slots(var->type);
558 unsigned stream = var->data.stream;
559 unsigned idx = var->data.location;
560
561 assert(stream < 4);
562
563 info->gs.max_stream = MAX2(info->gs.max_stream, stream);
564 info->gs.num_stream_output_components[stream] += num_components;
565 info->gs.output_streams[idx] = stream;
566 }
567
568 static void
569 gather_info_output_decl(const nir_shader *nir, const nir_variable *var,
570 struct radv_shader_info *info,
571 const struct radv_shader_variant_key *key)
572 {
573 struct radv_vs_output_info *vs_info = NULL;
574
575 switch (nir->info.stage) {
576 case MESA_SHADER_FRAGMENT:
577 gather_info_output_decl_ps(nir, var, info);
578 break;
579 case MESA_SHADER_VERTEX:
580 if (!key->vs_common_out.as_ls &&
581 !key->vs_common_out.as_es)
582 vs_info = &info->vs.outinfo;
583
584 if (key->vs_common_out.as_ls)
585 gather_info_output_decl_ls(nir, var, info);
586 else if (key->vs_common_out.as_ngg)
587 gather_info_output_decl_gs(nir, var, info);
588 break;
589 case MESA_SHADER_GEOMETRY:
590 vs_info = &info->vs.outinfo;
591 gather_info_output_decl_gs(nir, var, info);
592 break;
593 case MESA_SHADER_TESS_EVAL:
594 if (!key->vs_common_out.as_es)
595 vs_info = &info->tes.outinfo;
596 break;
597 default:
598 break;
599 }
600
601 if (vs_info) {
602 switch (var->data.location) {
603 case VARYING_SLOT_CLIP_DIST0:
604 vs_info->clip_dist_mask =
605 (1 << nir->info.clip_distance_array_size) - 1;
606 vs_info->cull_dist_mask =
607 (1 << nir->info.cull_distance_array_size) - 1;
608 vs_info->cull_dist_mask <<= nir->info.clip_distance_array_size;
609 break;
610 case VARYING_SLOT_PSIZ:
611 vs_info->writes_pointsize = true;
612 break;
613 case VARYING_SLOT_VIEWPORT:
614 vs_info->writes_viewport_index = true;
615 break;
616 case VARYING_SLOT_LAYER:
617 vs_info->writes_layer = true;
618 break;
619 default:
620 break;
621 }
622 }
623 }
624
625 static void
626 gather_xfb_info(const nir_shader *nir, struct radv_shader_info *info)
627 {
628 nir_xfb_info *xfb = nir_gather_xfb_info(nir, NULL);
629 struct radv_streamout_info *so = &info->so;
630
631 if (!xfb)
632 return;
633
634 assert(xfb->output_count < MAX_SO_OUTPUTS);
635 so->num_outputs = xfb->output_count;
636
637 for (unsigned i = 0; i < xfb->output_count; i++) {
638 struct radv_stream_output *output = &so->outputs[i];
639
640 output->buffer = xfb->outputs[i].buffer;
641 output->stream = xfb->buffer_to_stream[xfb->outputs[i].buffer];
642 output->offset = xfb->outputs[i].offset;
643 output->location = xfb->outputs[i].location;
644 output->component_mask = xfb->outputs[i].component_mask;
645
646 so->enabled_stream_buffers_mask |=
647 (1 << output->buffer) << (output->stream * 4);
648
649 }
650
651 for (unsigned i = 0; i < NIR_MAX_XFB_BUFFERS; i++) {
652 so->strides[i] = xfb->buffers[i].stride / 4;
653 }
654
655 ralloc_free(xfb);
656 }
657
658 void
659 radv_nir_shader_info_init(struct radv_shader_info *info)
660 {
661 /* Assume that shaders only have 32-bit push constants by default. */
662 info->min_push_constant_used = UINT8_MAX;
663 info->has_only_32bit_push_constants = true;
664 }
665
666 void
667 radv_nir_shader_info_pass(const struct nir_shader *nir,
668 const struct radv_pipeline_layout *layout,
669 const struct radv_shader_variant_key *key,
670 struct radv_shader_info *info,
671 bool use_llvm)
672 {
673 struct nir_function *func =
674 (struct nir_function *)exec_list_get_head_const(&nir->functions);
675
676 if (layout && layout->dynamic_offset_count &&
677 (layout->dynamic_shader_stages & mesa_to_vk_shader_stage(nir->info.stage))) {
678 info->loads_push_constants = true;
679 info->loads_dynamic_offsets = true;
680 }
681
682 nir_foreach_variable(variable, &nir->inputs)
683 gather_info_input_decl(nir, variable, info, key);
684
685 nir_foreach_block(block, func->impl) {
686 gather_info_block(nir, block, info);
687 }
688
689 nir_foreach_variable(variable, &nir->outputs)
690 gather_info_output_decl(nir, variable, info, key);
691
692 if (nir->info.stage == MESA_SHADER_VERTEX ||
693 nir->info.stage == MESA_SHADER_TESS_EVAL ||
694 nir->info.stage == MESA_SHADER_GEOMETRY)
695 gather_xfb_info(nir, info);
696
697 /* Make sure to export the LayerID if the fragment shader needs it. */
698 if (key->vs_common_out.export_layer_id) {
699 switch (nir->info.stage) {
700 case MESA_SHADER_VERTEX:
701 info->vs.output_usage_mask[VARYING_SLOT_LAYER] |= 0x1;
702 break;
703 case MESA_SHADER_TESS_EVAL:
704 info->tes.output_usage_mask[VARYING_SLOT_LAYER] |= 0x1;
705 break;
706 case MESA_SHADER_GEOMETRY:
707 info->gs.output_usage_mask[VARYING_SLOT_LAYER] |= 0x1;
708 break;
709 default:
710 break;
711 }
712 }
713
714 /* Make sure to export the LayerID if the subpass has multiviews. */
715 if (key->has_multiview_view_index) {
716 switch (nir->info.stage) {
717 case MESA_SHADER_VERTEX:
718 info->vs.outinfo.writes_layer = true;
719 break;
720 case MESA_SHADER_TESS_EVAL:
721 info->tes.outinfo.writes_layer = true;
722 break;
723 case MESA_SHADER_GEOMETRY:
724 info->vs.outinfo.writes_layer = true;
725 break;
726 default:
727 break;
728 }
729 }
730
731 /* Make sure to export the PrimitiveID if the fragment shader needs it. */
732 if (key->vs_common_out.export_prim_id) {
733 switch (nir->info.stage) {
734 case MESA_SHADER_VERTEX:
735 info->vs.outinfo.export_prim_id = true;
736 break;
737 case MESA_SHADER_TESS_EVAL:
738 info->tes.outinfo.export_prim_id = true;
739 break;
740 case MESA_SHADER_GEOMETRY:
741 info->vs.outinfo.export_prim_id = true;
742 break;
743 default:
744 break;
745 }
746 }
747
748 /* Make sure to export the ViewportIndex if the fragment shader needs it. */
749 if (key->vs_common_out.export_viewport_index) {
750 switch (nir->info.stage) {
751 case MESA_SHADER_VERTEX:
752 info->vs.output_usage_mask[VARYING_SLOT_VIEWPORT] |= 0x1;
753 break;
754 case MESA_SHADER_TESS_EVAL:
755 info->tes.output_usage_mask[VARYING_SLOT_VIEWPORT] |= 0x1;
756 break;
757 case MESA_SHADER_GEOMETRY:
758 info->gs.output_usage_mask[VARYING_SLOT_VIEWPORT] |= 0x1;
759 break;
760 default:
761 break;
762 }
763 }
764
765 if (nir->info.stage == MESA_SHADER_FRAGMENT)
766 info->ps.num_interp = nir->num_inputs;
767
768 switch (nir->info.stage) {
769 case MESA_SHADER_COMPUTE:
770 for (int i = 0; i < 3; ++i)
771 info->cs.block_size[i] = nir->info.cs.local_size[i];
772 break;
773 case MESA_SHADER_FRAGMENT:
774 info->ps.can_discard = nir->info.fs.uses_discard;
775 info->ps.early_fragment_test = nir->info.fs.early_fragment_tests;
776 info->ps.post_depth_coverage = nir->info.fs.post_depth_coverage;
777 info->ps.depth_layout = nir->info.fs.depth_layout;
778 break;
779 case MESA_SHADER_GEOMETRY:
780 info->gs.vertices_in = nir->info.gs.vertices_in;
781 info->gs.vertices_out = nir->info.gs.vertices_out;
782 info->gs.output_prim = nir->info.gs.output_primitive;
783 info->gs.invocations = nir->info.gs.invocations;
784 break;
785 case MESA_SHADER_TESS_EVAL:
786 info->tes.primitive_mode = nir->info.tess.primitive_mode;
787 info->tes.spacing = nir->info.tess.spacing;
788 info->tes.ccw = nir->info.tess.ccw;
789 info->tes.point_mode = nir->info.tess.point_mode;
790 info->tes.as_es = key->vs_common_out.as_es;
791 info->tes.export_prim_id = key->vs_common_out.export_prim_id;
792 info->is_ngg = key->vs_common_out.as_ngg;
793 info->is_ngg_passthrough = key->vs_common_out.as_ngg_passthrough;
794 break;
795 case MESA_SHADER_TESS_CTRL:
796 info->tcs.tcs_vertices_out = nir->info.tess.tcs_vertices_out;
797 break;
798 case MESA_SHADER_VERTEX:
799 info->vs.as_es = key->vs_common_out.as_es;
800 info->vs.as_ls = key->vs_common_out.as_ls;
801 info->vs.export_prim_id = key->vs_common_out.export_prim_id;
802 info->is_ngg = key->vs_common_out.as_ngg;
803 info->is_ngg_passthrough = key->vs_common_out.as_ngg_passthrough;
804 break;
805 default:
806 break;
807 }
808
809 if (nir->info.stage == MESA_SHADER_GEOMETRY) {
810 unsigned add_clip = nir->info.clip_distance_array_size +
811 nir->info.cull_distance_array_size > 4;
812 info->gs.gsvs_vertex_size =
813 (util_bitcount64(nir->info.outputs_written) + add_clip) * 16;
814 info->gs.max_gsvs_emit_size =
815 info->gs.gsvs_vertex_size * nir->info.gs.vertices_out;
816 }
817
818 /* Compute the ESGS item size for VS or TES as ES. */
819 if ((nir->info.stage == MESA_SHADER_VERTEX ||
820 nir->info.stage == MESA_SHADER_TESS_EVAL) &&
821 key->vs_common_out.as_es) {
822 struct radv_es_output_info *es_info =
823 nir->info.stage == MESA_SHADER_VERTEX ? &info->vs.es_info : &info->tes.es_info;
824
825 if (use_llvm) {
826 /* The outputs may contain gaps, use the highest output index + 1 */
827 uint32_t max_output_written = 0;
828 uint64_t output_mask = nir->info.outputs_written;
829
830 while (output_mask) {
831 const int i = u_bit_scan64(&output_mask);
832 unsigned param_index = shader_io_get_unique_index(i);
833
834 max_output_written = MAX2(param_index, max_output_written);
835 }
836 es_info->esgs_itemsize = (max_output_written + 1) * 16;
837 } else {
838 /* The outputs don't contain gaps, se we can use the number of outputs */
839 uint32_t num_outputs_written = nir->info.stage == MESA_SHADER_VERTEX
840 ? info->vs.num_linked_outputs
841 : info->tes.num_linked_outputs;
842 es_info->esgs_itemsize = num_outputs_written * 16;
843 }
844 }
845
846 info->float_controls_mode = nir->info.float_controls_execution_mode;
847
848 if (nir->info.stage == MESA_SHADER_FRAGMENT) {
849 if (key->fs.is_dual_src) {
850 info->ps.cb_shader_mask |= (info->ps.cb_shader_mask & 0xf) << 4;
851 }
852 }
853 }