radv: use the base object struct types
[mesa.git] / src / amd / vulkan / radv_shader.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
34 #include "radv_shader_args.h"
35 #include "nir/nir.h"
36 #include "nir/nir_builder.h"
37 #include "spirv/nir_spirv.h"
38
39 #include "sid.h"
40 #include "ac_binary.h"
41 #include "ac_llvm_util.h"
42 #include "ac_nir_to_llvm.h"
43 #include "ac_rtld.h"
44 #include "vk_format.h"
45 #include "util/debug.h"
46 #include "ac_exp_param.h"
47
48 #include "aco_interface.h"
49
50 #include "util/string_buffer.h"
51
52 static const struct nir_shader_compiler_options nir_options_llvm = {
53 .vertex_id_zero_based = true,
54 .lower_scmp = true,
55 .lower_flrp16 = true,
56 .lower_flrp32 = true,
57 .lower_flrp64 = true,
58 .lower_device_index_to_zero = true,
59 .lower_fsat = true,
60 .lower_fdiv = true,
61 .lower_fmod = true,
62 .lower_bitfield_insert_to_bitfield_select = true,
63 .lower_bitfield_extract = true,
64 .lower_sub = true,
65 .lower_pack_snorm_2x16 = true,
66 .lower_pack_snorm_4x8 = true,
67 .lower_pack_unorm_2x16 = true,
68 .lower_pack_unorm_4x8 = true,
69 .lower_unpack_snorm_2x16 = true,
70 .lower_unpack_snorm_4x8 = true,
71 .lower_unpack_unorm_2x16 = true,
72 .lower_unpack_unorm_4x8 = true,
73 .lower_extract_byte = true,
74 .lower_extract_word = true,
75 .lower_ffma = true,
76 .lower_fpow = true,
77 .lower_mul_2x32_64 = true,
78 .lower_rotate = true,
79 .max_unroll_iterations = 32,
80 .use_interpolated_input_intrinsics = true,
81 /* nir_lower_int64() isn't actually called for the LLVM backend, but
82 * this helps the loop unrolling heuristics. */
83 .lower_int64_options = nir_lower_imul64 |
84 nir_lower_imul_high64 |
85 nir_lower_imul_2x32_64 |
86 nir_lower_divmod64 |
87 nir_lower_minmax64 |
88 nir_lower_iabs64,
89 };
90
91 static const struct nir_shader_compiler_options nir_options_aco = {
92 .vertex_id_zero_based = true,
93 .lower_scmp = true,
94 .lower_flrp16 = true,
95 .lower_flrp32 = true,
96 .lower_flrp64 = true,
97 .lower_device_index_to_zero = true,
98 .lower_fdiv = true,
99 .lower_fmod = true,
100 .lower_bitfield_insert_to_bitfield_select = true,
101 .lower_bitfield_extract = true,
102 .lower_pack_snorm_2x16 = true,
103 .lower_pack_snorm_4x8 = true,
104 .lower_pack_unorm_2x16 = true,
105 .lower_pack_unorm_4x8 = true,
106 .lower_unpack_snorm_2x16 = true,
107 .lower_unpack_snorm_4x8 = true,
108 .lower_unpack_unorm_2x16 = true,
109 .lower_unpack_unorm_4x8 = true,
110 .lower_unpack_half_2x16 = true,
111 .lower_extract_byte = true,
112 .lower_extract_word = true,
113 .lower_ffma = true,
114 .lower_fpow = true,
115 .lower_mul_2x32_64 = true,
116 .lower_rotate = true,
117 .max_unroll_iterations = 32,
118 .use_interpolated_input_intrinsics = true,
119 .lower_int64_options = nir_lower_imul64 |
120 nir_lower_imul_high64 |
121 nir_lower_imul_2x32_64 |
122 nir_lower_divmod64 |
123 nir_lower_logic64 |
124 nir_lower_minmax64 |
125 nir_lower_iabs64,
126 };
127
128 bool
129 radv_can_dump_shader(struct radv_device *device,
130 struct radv_shader_module *module,
131 bool is_gs_copy_shader)
132 {
133 if (!(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS))
134 return false;
135 if (module)
136 return !module->nir ||
137 (device->instance->debug_flags & RADV_DEBUG_DUMP_META_SHADERS);
138
139 return is_gs_copy_shader;
140 }
141
142 bool
143 radv_can_dump_shader_stats(struct radv_device *device,
144 struct radv_shader_module *module)
145 {
146 /* Only dump non-meta shader stats. */
147 return device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS &&
148 module && !module->nir;
149 }
150
151 VkResult radv_CreateShaderModule(
152 VkDevice _device,
153 const VkShaderModuleCreateInfo* pCreateInfo,
154 const VkAllocationCallbacks* pAllocator,
155 VkShaderModule* pShaderModule)
156 {
157 RADV_FROM_HANDLE(radv_device, device, _device);
158 struct radv_shader_module *module;
159
160 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
161 assert(pCreateInfo->flags == 0);
162
163 module = vk_alloc2(&device->vk.alloc, pAllocator,
164 sizeof(*module) + pCreateInfo->codeSize, 8,
165 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
166 if (module == NULL)
167 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
168
169 vk_object_base_init(&device->vk, &module->base,
170 VK_OBJECT_TYPE_SHADER_MODULE);
171
172 module->nir = NULL;
173 module->size = pCreateInfo->codeSize;
174 memcpy(module->data, pCreateInfo->pCode, module->size);
175
176 _mesa_sha1_compute(module->data, module->size, module->sha1);
177
178 *pShaderModule = radv_shader_module_to_handle(module);
179
180 return VK_SUCCESS;
181 }
182
183 void radv_DestroyShaderModule(
184 VkDevice _device,
185 VkShaderModule _module,
186 const VkAllocationCallbacks* pAllocator)
187 {
188 RADV_FROM_HANDLE(radv_device, device, _device);
189 RADV_FROM_HANDLE(radv_shader_module, module, _module);
190
191 if (!module)
192 return;
193
194 vk_object_base_finish(&module->base);
195 vk_free2(&device->vk.alloc, pAllocator, module);
196 }
197
198 void
199 radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
200 bool allow_copies)
201 {
202 bool progress;
203 unsigned lower_flrp =
204 (shader->options->lower_flrp16 ? 16 : 0) |
205 (shader->options->lower_flrp32 ? 32 : 0) |
206 (shader->options->lower_flrp64 ? 64 : 0);
207
208 do {
209 progress = false;
210
211 NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
212 NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
213
214 NIR_PASS_V(shader, nir_lower_vars_to_ssa);
215 NIR_PASS_V(shader, nir_lower_pack);
216
217 if (allow_copies) {
218 /* Only run this pass in the first call to
219 * radv_optimize_nir. Later calls assume that we've
220 * lowered away any copy_deref instructions and we
221 * don't want to introduce any more.
222 */
223 NIR_PASS(progress, shader, nir_opt_find_array_copies);
224 }
225
226 NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
227 NIR_PASS(progress, shader, nir_opt_dead_write_vars);
228 NIR_PASS(progress, shader, nir_remove_dead_variables,
229 nir_var_function_temp | nir_var_shader_in | nir_var_shader_out);
230
231 NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL, NULL);
232 NIR_PASS_V(shader, nir_lower_phis_to_scalar);
233
234 NIR_PASS(progress, shader, nir_copy_prop);
235 NIR_PASS(progress, shader, nir_opt_remove_phis);
236 NIR_PASS(progress, shader, nir_opt_dce);
237 if (nir_opt_trivial_continues(shader)) {
238 progress = true;
239 NIR_PASS(progress, shader, nir_copy_prop);
240 NIR_PASS(progress, shader, nir_opt_remove_phis);
241 NIR_PASS(progress, shader, nir_opt_dce);
242 }
243 NIR_PASS(progress, shader, nir_opt_if, true);
244 NIR_PASS(progress, shader, nir_opt_dead_cf);
245 NIR_PASS(progress, shader, nir_opt_cse);
246 NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
247 NIR_PASS(progress, shader, nir_opt_constant_folding);
248 NIR_PASS(progress, shader, nir_opt_algebraic);
249
250 if (lower_flrp != 0) {
251 bool lower_flrp_progress = false;
252 NIR_PASS(lower_flrp_progress,
253 shader,
254 nir_lower_flrp,
255 lower_flrp,
256 false /* always_precise */,
257 shader->options->lower_ffma);
258 if (lower_flrp_progress) {
259 NIR_PASS(progress, shader,
260 nir_opt_constant_folding);
261 progress = true;
262 }
263
264 /* Nothing should rematerialize any flrps, so we only
265 * need to do this lowering once.
266 */
267 lower_flrp = 0;
268 }
269
270 NIR_PASS(progress, shader, nir_opt_undef);
271 if (shader->options->max_unroll_iterations) {
272 NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
273 }
274 } while (progress && !optimize_conservatively);
275
276 NIR_PASS(progress, shader, nir_opt_conditional_discard);
277 NIR_PASS(progress, shader, nir_opt_shrink_load);
278 NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
279 }
280
281 static void
282 shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
283 {
284 assert(glsl_type_is_vector_or_scalar(type));
285
286 uint32_t comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
287 unsigned length = glsl_get_vector_elements(type);
288 *size = comp_size * length,
289 *align = comp_size;
290 }
291
292 nir_shader *
293 radv_shader_compile_to_nir(struct radv_device *device,
294 struct radv_shader_module *module,
295 const char *entrypoint_name,
296 gl_shader_stage stage,
297 const VkSpecializationInfo *spec_info,
298 const VkPipelineCreateFlags flags,
299 const struct radv_pipeline_layout *layout,
300 unsigned subgroup_size, unsigned ballot_bit_size)
301 {
302 nir_shader *nir;
303 const nir_shader_compiler_options *nir_options =
304 device->physical_device->use_aco ? &nir_options_aco :
305 &nir_options_llvm;
306
307 if (module->nir) {
308 /* Some things such as our meta clear/blit code will give us a NIR
309 * shader directly. In that case, we just ignore the SPIR-V entirely
310 * and just use the NIR shader */
311 nir = module->nir;
312 nir->options = nir_options;
313 nir_validate_shader(nir, "in internal shader");
314
315 assert(exec_list_length(&nir->functions) == 1);
316 } else {
317 uint32_t *spirv = (uint32_t *) module->data;
318 assert(module->size % 4 == 0);
319
320 if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
321 radv_print_spirv(module->data, module->size, stderr);
322
323 uint32_t num_spec_entries = 0;
324 struct nir_spirv_specialization *spec_entries = NULL;
325 if (spec_info && spec_info->mapEntryCount > 0) {
326 num_spec_entries = spec_info->mapEntryCount;
327 spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
328 for (uint32_t i = 0; i < num_spec_entries; i++) {
329 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
330 const void *data = spec_info->pData + entry.offset;
331 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
332
333 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
334 switch (entry.size) {
335 case 8:
336 spec_entries[i].value.u64 = *(const uint64_t *)data;
337 break;
338 case 4:
339 spec_entries[i].value.u32 = *(const uint32_t *)data;
340 break;
341 case 2:
342 spec_entries[i].value.u16 = *(const uint16_t *)data;
343 break;
344 case 1:
345 spec_entries[i].value.u8 = *(const uint8_t *)data;
346 break;
347 default:
348 assert(!"Invalid spec constant size");
349 break;
350 }
351 }
352 }
353 bool int8_int16_enable = !device->physical_device->use_aco ||
354 device->physical_device->rad_info.chip_class >= GFX8;
355 const struct spirv_to_nir_options spirv_options = {
356 .lower_ubo_ssbo_access_to_offsets = true,
357 .caps = {
358 .amd_fragment_mask = true,
359 .amd_gcn_shader = true,
360 .amd_image_read_write_lod = true,
361 .amd_shader_ballot = device->physical_device->use_shader_ballot,
362 .amd_shader_explicit_vertex_parameter = true,
363 .amd_trinary_minmax = true,
364 .demote_to_helper_invocation = device->physical_device->use_aco,
365 .derivative_group = true,
366 .descriptor_array_dynamic_indexing = true,
367 .descriptor_array_non_uniform_indexing = true,
368 .descriptor_indexing = true,
369 .device_group = true,
370 .draw_parameters = true,
371 .float_controls = true,
372 .float16 = device->physical_device->rad_info.has_double_rate_fp16 && !device->physical_device->use_aco,
373 .float64 = true,
374 .geometry_streams = true,
375 .image_ms_array = true,
376 .image_read_without_format = true,
377 .image_write_without_format = true,
378 .int8 = int8_int16_enable,
379 .int16 = int8_int16_enable,
380 .int64 = true,
381 .int64_atomics = true,
382 .multiview = true,
383 .physical_storage_buffer_address = true,
384 .post_depth_coverage = true,
385 .runtime_descriptor_array = true,
386 .shader_clock = true,
387 .shader_viewport_index_layer = true,
388 .stencil_export = true,
389 .storage_8bit = int8_int16_enable,
390 .storage_16bit = int8_int16_enable,
391 .storage_image_ms = true,
392 .subgroup_arithmetic = true,
393 .subgroup_ballot = true,
394 .subgroup_basic = true,
395 .subgroup_quad = true,
396 .subgroup_shuffle = true,
397 .subgroup_vote = true,
398 .tessellation = true,
399 .transform_feedback = true,
400 .variable_pointers = true,
401 },
402 .ubo_addr_format = nir_address_format_32bit_index_offset,
403 .ssbo_addr_format = nir_address_format_32bit_index_offset,
404 .phys_ssbo_addr_format = nir_address_format_64bit_global,
405 .push_const_addr_format = nir_address_format_logical,
406 .shared_addr_format = nir_address_format_32bit_offset,
407 .frag_coord_is_sysval = true,
408 };
409 nir = spirv_to_nir(spirv, module->size / 4,
410 spec_entries, num_spec_entries,
411 stage, entrypoint_name,
412 &spirv_options, nir_options);
413 assert(nir->info.stage == stage);
414 nir_validate_shader(nir, "after spirv_to_nir");
415
416 free(spec_entries);
417
418 /* We have to lower away local constant initializers right before we
419 * inline functions. That way they get properly initialized at the top
420 * of the function and not at the top of its caller.
421 */
422 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
423 NIR_PASS_V(nir, nir_lower_returns);
424 NIR_PASS_V(nir, nir_inline_functions);
425 NIR_PASS_V(nir, nir_opt_deref);
426
427 /* Pick off the single entrypoint that we want */
428 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
429 if (func->is_entrypoint)
430 func->name = ralloc_strdup(func, "main");
431 else
432 exec_node_remove(&func->node);
433 }
434 assert(exec_list_length(&nir->functions) == 1);
435
436 /* Make sure we lower constant initializers on output variables so that
437 * nir_remove_dead_variables below sees the corresponding stores
438 */
439 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_shader_out);
440
441 /* Now that we've deleted all but the main function, we can go ahead and
442 * lower the rest of the constant initializers.
443 */
444 NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
445
446 /* Split member structs. We do this before lower_io_to_temporaries so that
447 * it doesn't lower system values to temporaries by accident.
448 */
449 NIR_PASS_V(nir, nir_split_var_copies);
450 NIR_PASS_V(nir, nir_split_per_member_structs);
451
452 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
453 device->physical_device->use_aco)
454 NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out);
455 if (nir->info.stage == MESA_SHADER_FRAGMENT)
456 NIR_PASS_V(nir, nir_lower_input_attachments, true);
457
458 NIR_PASS_V(nir, nir_remove_dead_variables,
459 nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
460
461 NIR_PASS_V(nir, nir_propagate_invariant);
462
463 NIR_PASS_V(nir, nir_lower_system_values);
464 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
465 NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
466 if (device->instance->debug_flags & RADV_DEBUG_DISCARD_TO_DEMOTE)
467 NIR_PASS_V(nir, nir_lower_discard_to_demote);
468 }
469
470 /* Vulkan uses the separate-shader linking model */
471 nir->info.separate_shader = true;
472
473 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
474
475 if (nir->info.stage == MESA_SHADER_GEOMETRY)
476 nir_lower_gs_intrinsics(nir, true);
477
478 static const nir_lower_tex_options tex_options = {
479 .lower_txp = ~0,
480 .lower_tg4_offsets = true,
481 };
482
483 nir_lower_tex(nir, &tex_options);
484
485 nir_lower_vars_to_ssa(nir);
486
487 if (nir->info.stage == MESA_SHADER_VERTEX ||
488 nir->info.stage == MESA_SHADER_GEOMETRY ||
489 nir->info.stage == MESA_SHADER_FRAGMENT) {
490 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
491 nir_shader_get_entrypoint(nir), true, true);
492 } else if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
493 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
494 nir_shader_get_entrypoint(nir), true, false);
495 }
496
497 nir_split_var_copies(nir);
498
499 nir_lower_global_vars_to_local(nir);
500 nir_remove_dead_variables(nir, nir_var_function_temp);
501 bool gfx7minus = device->physical_device->rad_info.chip_class <= GFX7;
502 nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
503 .subgroup_size = subgroup_size,
504 .ballot_bit_size = ballot_bit_size,
505 .lower_to_scalar = 1,
506 .lower_subgroup_masks = 1,
507 .lower_shuffle = 1,
508 .lower_shuffle_to_32bit = 1,
509 .lower_vote_eq_to_ballot = 1,
510 .lower_quad_broadcast_dynamic = 1,
511 .lower_quad_broadcast_dynamic_to_const = gfx7minus,
512 });
513
514 nir_lower_load_const_to_scalar(nir);
515
516 if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
517 radv_optimize_nir(nir, false, true);
518
519 /* We call nir_lower_var_copies() after the first radv_optimize_nir()
520 * to remove any copies introduced by nir_opt_find_array_copies().
521 */
522 nir_lower_var_copies(nir);
523
524 /* Lower deref operations for compute shared memory. */
525 if (nir->info.stage == MESA_SHADER_COMPUTE) {
526 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
527 nir_var_mem_shared, shared_var_info);
528 NIR_PASS_V(nir, nir_lower_explicit_io,
529 nir_var_mem_shared, nir_address_format_32bit_offset);
530 }
531
532 /* Lower large variables that are always constant with load_constant
533 * intrinsics, which get turned into PC-relative loads from a data
534 * section next to the shader.
535 */
536 NIR_PASS_V(nir, nir_opt_large_constants,
537 glsl_get_natural_size_align_bytes, 16);
538
539 /* Indirect lowering must be called after the radv_optimize_nir() loop
540 * has been called at least once. Otherwise indirect lowering can
541 * bloat the instruction count of the loop and cause it to be
542 * considered too large for unrolling.
543 */
544 ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
545 radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, false);
546
547 return nir;
548 }
549
550 static int
551 type_size_vec4(const struct glsl_type *type, bool bindless)
552 {
553 return glsl_count_attribute_slots(type, false);
554 }
555
556 static nir_variable *
557 find_layer_in_var(nir_shader *nir)
558 {
559 nir_foreach_variable(var, &nir->inputs) {
560 if (var->data.location == VARYING_SLOT_LAYER) {
561 return var;
562 }
563 }
564
565 nir_variable *var =
566 nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
567 var->data.location = VARYING_SLOT_LAYER;
568 var->data.interpolation = INTERP_MODE_FLAT;
569 return var;
570 }
571
572 /* We use layered rendering to implement multiview, which means we need to map
573 * view_index to gl_Layer. The attachment lowering also uses needs to know the
574 * layer so that it can sample from the correct layer. The code generates a
575 * load from the layer_id sysval, but since we don't have a way to get at this
576 * information from the fragment shader, we also need to lower this to the
577 * gl_Layer varying. This pass lowers both to a varying load from the LAYER
578 * slot, before lowering io, so that nir_assign_var_locations() will give the
579 * LAYER varying the correct driver_location.
580 */
581
582 static bool
583 lower_view_index(nir_shader *nir)
584 {
585 bool progress = false;
586 nir_function_impl *entry = nir_shader_get_entrypoint(nir);
587 nir_builder b;
588 nir_builder_init(&b, entry);
589
590 nir_variable *layer = NULL;
591 nir_foreach_block(block, entry) {
592 nir_foreach_instr_safe(instr, block) {
593 if (instr->type != nir_instr_type_intrinsic)
594 continue;
595
596 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
597 if (load->intrinsic != nir_intrinsic_load_view_index &&
598 load->intrinsic != nir_intrinsic_load_layer_id)
599 continue;
600
601 if (!layer)
602 layer = find_layer_in_var(nir);
603
604 b.cursor = nir_before_instr(instr);
605 nir_ssa_def *def = nir_load_var(&b, layer);
606 nir_ssa_def_rewrite_uses(&load->dest.ssa,
607 nir_src_for_ssa(def));
608
609 nir_instr_remove(instr);
610 progress = true;
611 }
612 }
613
614 return progress;
615 }
616
617 void
618 radv_lower_fs_io(nir_shader *nir)
619 {
620 NIR_PASS_V(nir, lower_view_index);
621 nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs,
622 MESA_SHADER_FRAGMENT);
623
624 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
625
626 /* This pass needs actual constants */
627 nir_opt_constant_folding(nir);
628
629 NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
630 }
631
632
633 void *
634 radv_alloc_shader_memory(struct radv_device *device,
635 struct radv_shader_variant *shader)
636 {
637 mtx_lock(&device->shader_slab_mutex);
638 list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
639 uint64_t offset = 0;
640 list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
641 if (s->bo_offset - offset >= shader->code_size) {
642 shader->bo = slab->bo;
643 shader->bo_offset = offset;
644 list_addtail(&shader->slab_list, &s->slab_list);
645 mtx_unlock(&device->shader_slab_mutex);
646 return slab->ptr + offset;
647 }
648 offset = align_u64(s->bo_offset + s->code_size, 256);
649 }
650 if (offset <= slab->size && slab->size - offset >= shader->code_size) {
651 shader->bo = slab->bo;
652 shader->bo_offset = offset;
653 list_addtail(&shader->slab_list, &slab->shaders);
654 mtx_unlock(&device->shader_slab_mutex);
655 return slab->ptr + offset;
656 }
657 }
658
659 mtx_unlock(&device->shader_slab_mutex);
660 struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
661
662 slab->size = MAX2(256 * 1024, shader->code_size);
663 slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
664 RADEON_DOMAIN_VRAM,
665 RADEON_FLAG_NO_INTERPROCESS_SHARING |
666 (device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
667 0 : RADEON_FLAG_READ_ONLY),
668 RADV_BO_PRIORITY_SHADER);
669 slab->ptr = (char*)device->ws->buffer_map(slab->bo);
670 list_inithead(&slab->shaders);
671
672 mtx_lock(&device->shader_slab_mutex);
673 list_add(&slab->slabs, &device->shader_slabs);
674
675 shader->bo = slab->bo;
676 shader->bo_offset = 0;
677 list_add(&shader->slab_list, &slab->shaders);
678 mtx_unlock(&device->shader_slab_mutex);
679 return slab->ptr;
680 }
681
682 void
683 radv_destroy_shader_slabs(struct radv_device *device)
684 {
685 list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
686 device->ws->buffer_destroy(slab->bo);
687 free(slab);
688 }
689 mtx_destroy(&device->shader_slab_mutex);
690 }
691
692 /* For the UMR disassembler. */
693 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
694 #define DEBUGGER_NUM_MARKERS 5
695
696 static unsigned
697 radv_get_shader_binary_size(size_t code_size)
698 {
699 return code_size + DEBUGGER_NUM_MARKERS * 4;
700 }
701
702 static void radv_postprocess_config(const struct radv_physical_device *pdevice,
703 const struct ac_shader_config *config_in,
704 const struct radv_shader_info *info,
705 gl_shader_stage stage,
706 struct ac_shader_config *config_out)
707 {
708 bool scratch_enabled = config_in->scratch_bytes_per_wave > 0;
709 unsigned vgpr_comp_cnt = 0;
710 unsigned num_input_vgprs = info->num_input_vgprs;
711
712 if (stage == MESA_SHADER_FRAGMENT) {
713 num_input_vgprs = ac_get_fs_input_vgpr_cnt(config_in, NULL, NULL);
714 }
715
716 unsigned num_vgprs = MAX2(config_in->num_vgprs, num_input_vgprs);
717 /* +3 for scratch wave offset and VCC */
718 unsigned num_sgprs = MAX2(config_in->num_sgprs, info->num_input_sgprs + 3);
719 unsigned num_shared_vgprs = config_in->num_shared_vgprs;
720 /* shared VGPRs are introduced in Navi and are allocated in blocks of 8 (RDNA ref 3.6.5) */
721 assert((pdevice->rad_info.chip_class >= GFX10 && num_shared_vgprs % 8 == 0)
722 || (pdevice->rad_info.chip_class < GFX10 && num_shared_vgprs == 0));
723 unsigned num_shared_vgpr_blocks = num_shared_vgprs / 8;
724
725 *config_out = *config_in;
726 config_out->num_vgprs = num_vgprs;
727 config_out->num_sgprs = num_sgprs;
728 config_out->num_shared_vgprs = num_shared_vgprs;
729
730 config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
731 S_00B12C_SCRATCH_EN(scratch_enabled);
732
733 if (!pdevice->use_ngg_streamout) {
734 config_out->rsrc2 |= S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
735 S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
736 S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
737 S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
738 S_00B12C_SO_EN(!!info->so.num_outputs);
739 }
740
741 config_out->rsrc1 = S_00B848_VGPRS((num_vgprs - 1) /
742 (info->wave_size == 32 ? 8 : 4)) |
743 S_00B848_DX10_CLAMP(1) |
744 S_00B848_FLOAT_MODE(config_out->float_mode);
745
746 if (pdevice->rad_info.chip_class >= GFX10) {
747 config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(info->num_user_sgprs >> 5);
748 } else {
749 config_out->rsrc1 |= S_00B228_SGPRS((num_sgprs - 1) / 8);
750 config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(info->num_user_sgprs >> 5);
751 }
752
753 switch (stage) {
754 case MESA_SHADER_TESS_EVAL:
755 if (info->is_ngg) {
756 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
757 config_out->rsrc2 |= S_00B22C_OC_LDS_EN(1);
758 } else if (info->tes.as_es) {
759 assert(pdevice->rad_info.chip_class <= GFX8);
760 vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
761
762 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
763 } else {
764 bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
765 vgpr_comp_cnt = enable_prim_id ? 3 : 2;
766
767 config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
768 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
769 }
770 config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
771 break;
772 case MESA_SHADER_TESS_CTRL:
773 if (pdevice->rad_info.chip_class >= GFX9) {
774 /* We need at least 2 components for LS.
775 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
776 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
777 */
778 if (pdevice->rad_info.chip_class >= GFX10) {
779 vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1;
780 } else {
781 vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
782 }
783 } else {
784 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
785 }
786 config_out->rsrc1 |= S_00B428_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
787 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
788 config_out->rsrc2 |= S_00B42C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
789 break;
790 case MESA_SHADER_VERTEX:
791 if (info->is_ngg) {
792 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
793 } else if (info->vs.as_ls) {
794 assert(pdevice->rad_info.chip_class <= GFX8);
795 /* We need at least 2 components for LS.
796 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
797 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
798 */
799 vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
800 } else if (info->vs.as_es) {
801 assert(pdevice->rad_info.chip_class <= GFX8);
802 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
803 vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
804 } else {
805 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
806 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
807 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
808 */
809 if (info->vs.needs_instance_id && pdevice->rad_info.chip_class >= GFX10) {
810 vgpr_comp_cnt = 3;
811 } else if (info->vs.export_prim_id) {
812 vgpr_comp_cnt = 2;
813 } else if (info->vs.needs_instance_id) {
814 vgpr_comp_cnt = 1;
815 } else {
816 vgpr_comp_cnt = 0;
817 }
818
819 config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
820 }
821 config_out->rsrc2 |= S_00B12C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
822 break;
823 case MESA_SHADER_FRAGMENT:
824 config_out->rsrc1 |= S_00B028_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
825 config_out->rsrc2 |= S_00B02C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
826 break;
827 case MESA_SHADER_GEOMETRY:
828 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
829 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
830 config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
831 break;
832 case MESA_SHADER_COMPUTE:
833 config_out->rsrc1 |= S_00B848_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
834 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
835 config_out->rsrc2 |=
836 S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
837 S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
838 S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
839 S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
840 info->cs.uses_thread_id[1] ? 1 : 0) |
841 S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
842 S_00B84C_LDS_SIZE(config_in->lds_size);
843 config_out->rsrc3 |= S_00B8A0_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
844
845 break;
846 default:
847 unreachable("unsupported shader type");
848 break;
849 }
850
851 if (pdevice->rad_info.chip_class >= GFX10 && info->is_ngg &&
852 (stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_GEOMETRY)) {
853 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
854 gl_shader_stage es_stage = stage;
855 if (stage == MESA_SHADER_GEOMETRY)
856 es_stage = info->gs.es_type;
857
858 /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
859 if (es_stage == MESA_SHADER_VERTEX) {
860 es_vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 0;
861 } else if (es_stage == MESA_SHADER_TESS_EVAL) {
862 bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
863 es_vgpr_comp_cnt = enable_prim_id ? 3 : 2;
864 } else
865 unreachable("Unexpected ES shader stage");
866
867 bool tes_triangles = stage == MESA_SHADER_TESS_EVAL &&
868 info->tes.primitive_mode >= 4; /* GL_TRIANGLES */
869 if (info->uses_invocation_id || stage == MESA_SHADER_VERTEX) {
870 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
871 } else if (info->uses_prim_id) {
872 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
873 } else if (info->gs.vertices_in >= 3 || tes_triangles) {
874 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
875 } else {
876 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
877 }
878
879 config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt) |
880 S_00B228_WGP_MODE(1);
881 config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
882 S_00B22C_LDS_SIZE(config_in->lds_size) |
883 S_00B22C_OC_LDS_EN(es_stage == MESA_SHADER_TESS_EVAL);
884 } else if (pdevice->rad_info.chip_class >= GFX9 &&
885 stage == MESA_SHADER_GEOMETRY) {
886 unsigned es_type = info->gs.es_type;
887 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
888
889 if (es_type == MESA_SHADER_VERTEX) {
890 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
891 if (info->vs.needs_instance_id) {
892 es_vgpr_comp_cnt = pdevice->rad_info.chip_class >= GFX10 ? 3 : 1;
893 } else {
894 es_vgpr_comp_cnt = 0;
895 }
896 } else if (es_type == MESA_SHADER_TESS_EVAL) {
897 es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
898 } else {
899 unreachable("invalid shader ES type");
900 }
901
902 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
903 * VGPR[0:4] are always loaded.
904 */
905 if (info->uses_invocation_id) {
906 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
907 } else if (info->uses_prim_id) {
908 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
909 } else if (info->gs.vertices_in >= 3) {
910 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
911 } else {
912 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
913 }
914
915 config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
916 config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
917 S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
918 } else if (pdevice->rad_info.chip_class >= GFX9 &&
919 stage == MESA_SHADER_TESS_CTRL) {
920 config_out->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
921 } else {
922 config_out->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
923 }
924 }
925
926 struct radv_shader_variant *
927 radv_shader_variant_create(struct radv_device *device,
928 const struct radv_shader_binary *binary,
929 bool keep_shader_info)
930 {
931 struct ac_shader_config config = {0};
932 struct ac_rtld_binary rtld_binary = {0};
933 struct radv_shader_variant *variant = calloc(1, sizeof(struct radv_shader_variant));
934 if (!variant)
935 return NULL;
936
937 variant->ref_count = 1;
938
939 if (binary->type == RADV_BINARY_TYPE_RTLD) {
940 struct ac_rtld_symbol lds_symbols[2];
941 unsigned num_lds_symbols = 0;
942 const char *elf_data = (const char *)((struct radv_shader_binary_rtld *)binary)->data;
943 size_t elf_size = ((struct radv_shader_binary_rtld *)binary)->elf_size;
944
945 if (device->physical_device->rad_info.chip_class >= GFX9 &&
946 (binary->stage == MESA_SHADER_GEOMETRY || binary->info.is_ngg) &&
947 !binary->is_gs_copy_shader) {
948 /* We add this symbol even on LLVM <= 8 to ensure that
949 * shader->config.lds_size is set correctly below.
950 */
951 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
952 sym->name = "esgs_ring";
953 sym->size = binary->info.ngg_info.esgs_ring_size;
954 sym->align = 64 * 1024;
955 }
956
957 if (binary->info.is_ngg &&
958 binary->stage == MESA_SHADER_GEOMETRY) {
959 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
960 sym->name = "ngg_emit";
961 sym->size = binary->info.ngg_info.ngg_emit_size * 4;
962 sym->align = 4;
963 }
964
965 struct ac_rtld_open_info open_info = {
966 .info = &device->physical_device->rad_info,
967 .shader_type = binary->stage,
968 .wave_size = binary->info.wave_size,
969 .num_parts = 1,
970 .elf_ptrs = &elf_data,
971 .elf_sizes = &elf_size,
972 .num_shared_lds_symbols = num_lds_symbols,
973 .shared_lds_symbols = lds_symbols,
974 };
975
976 if (!ac_rtld_open(&rtld_binary, open_info)) {
977 free(variant);
978 return NULL;
979 }
980
981 if (!ac_rtld_read_config(&rtld_binary, &config)) {
982 ac_rtld_close(&rtld_binary);
983 free(variant);
984 return NULL;
985 }
986
987 if (rtld_binary.lds_size > 0) {
988 unsigned alloc_granularity = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
989 config.lds_size = align(rtld_binary.lds_size, alloc_granularity) / alloc_granularity;
990 }
991
992 variant->code_size = rtld_binary.rx_size;
993 variant->exec_size = rtld_binary.exec_size;
994 } else {
995 assert(binary->type == RADV_BINARY_TYPE_LEGACY);
996 config = ((struct radv_shader_binary_legacy *)binary)->config;
997 variant->code_size = radv_get_shader_binary_size(((struct radv_shader_binary_legacy *)binary)->code_size);
998 variant->exec_size = ((struct radv_shader_binary_legacy *)binary)->exec_size;
999 }
1000
1001 variant->info = binary->info;
1002 radv_postprocess_config(device->physical_device, &config, &binary->info,
1003 binary->stage, &variant->config);
1004
1005 if (radv_device_use_secure_compile(device->instance)) {
1006 if (binary->type == RADV_BINARY_TYPE_RTLD)
1007 ac_rtld_close(&rtld_binary);
1008
1009 return variant;
1010 }
1011
1012 void *dest_ptr = radv_alloc_shader_memory(device, variant);
1013
1014 if (binary->type == RADV_BINARY_TYPE_RTLD) {
1015 struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;
1016 struct ac_rtld_upload_info info = {
1017 .binary = &rtld_binary,
1018 .rx_va = radv_buffer_get_va(variant->bo) + variant->bo_offset,
1019 .rx_ptr = dest_ptr,
1020 };
1021
1022 if (!ac_rtld_upload(&info)) {
1023 radv_shader_variant_destroy(device, variant);
1024 ac_rtld_close(&rtld_binary);
1025 return NULL;
1026 }
1027
1028 if (keep_shader_info ||
1029 (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS)) {
1030 const char *disasm_data;
1031 size_t disasm_size;
1032 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm_data, &disasm_size)) {
1033 radv_shader_variant_destroy(device, variant);
1034 ac_rtld_close(&rtld_binary);
1035 return NULL;
1036 }
1037
1038 variant->ir_string = bin->llvm_ir_size ? strdup((const char*)(bin->data + bin->elf_size)) : NULL;
1039 variant->disasm_string = malloc(disasm_size + 1);
1040 memcpy(variant->disasm_string, disasm_data, disasm_size);
1041 variant->disasm_string[disasm_size] = 0;
1042 }
1043
1044 ac_rtld_close(&rtld_binary);
1045 } else {
1046 struct radv_shader_binary_legacy* bin = (struct radv_shader_binary_legacy *)binary;
1047 memcpy(dest_ptr, bin->data + bin->stats_size, bin->code_size);
1048
1049 /* Add end-of-code markers for the UMR disassembler. */
1050 uint32_t *ptr32 = (uint32_t *)dest_ptr + bin->code_size / 4;
1051 for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
1052 ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
1053
1054 variant->ir_string = bin->ir_size ? strdup((const char*)(bin->data + bin->stats_size + bin->code_size)) : NULL;
1055 variant->disasm_string = bin->disasm_size ? strdup((const char*)(bin->data + bin->stats_size + bin->code_size + bin->ir_size)) : NULL;
1056
1057 if (bin->stats_size) {
1058 variant->statistics = calloc(bin->stats_size, 1);
1059 memcpy(variant->statistics, bin->data, bin->stats_size);
1060 }
1061 }
1062 return variant;
1063 }
1064
1065 static char *
1066 radv_dump_nir_shaders(struct nir_shader * const *shaders,
1067 int shader_count)
1068 {
1069 char *data = NULL;
1070 char *ret = NULL;
1071 size_t size = 0;
1072 FILE *f = open_memstream(&data, &size);
1073 if (f) {
1074 for (int i = 0; i < shader_count; ++i)
1075 nir_print_shader(shaders[i], f);
1076 fclose(f);
1077 }
1078
1079 ret = malloc(size + 1);
1080 if (ret) {
1081 memcpy(ret, data, size);
1082 ret[size] = 0;
1083 }
1084 free(data);
1085 return ret;
1086 }
1087
1088 static struct radv_shader_variant *
1089 shader_variant_compile(struct radv_device *device,
1090 struct radv_shader_module *module,
1091 struct nir_shader * const *shaders,
1092 int shader_count,
1093 gl_shader_stage stage,
1094 struct radv_shader_info *info,
1095 struct radv_nir_compiler_options *options,
1096 bool gs_copy_shader,
1097 bool keep_shader_info,
1098 bool keep_statistic_info,
1099 struct radv_shader_binary **binary_out)
1100 {
1101 enum radeon_family chip_family = device->physical_device->rad_info.family;
1102 struct radv_shader_binary *binary = NULL;
1103
1104 options->family = chip_family;
1105 options->chip_class = device->physical_device->rad_info.chip_class;
1106 options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
1107 options->dump_preoptir = options->dump_shader &&
1108 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
1109 options->record_ir = keep_shader_info;
1110 options->record_stats = keep_statistic_info;
1111 options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
1112 options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
1113 options->address32_hi = device->physical_device->rad_info.address32_hi;
1114 options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug;
1115 options->use_ngg_streamout = device->physical_device->use_ngg_streamout;
1116
1117 struct radv_shader_args args = {};
1118 args.options = options;
1119 args.shader_info = info;
1120 args.is_gs_copy_shader = gs_copy_shader;
1121 radv_declare_shader_args(&args,
1122 gs_copy_shader ? MESA_SHADER_VERTEX
1123 : shaders[shader_count - 1]->info.stage,
1124 shader_count >= 2,
1125 shader_count >= 2 ? shaders[shader_count - 2]->info.stage
1126 : MESA_SHADER_VERTEX);
1127
1128 if (!device->physical_device->use_aco ||
1129 options->dump_shader || options->record_ir)
1130 ac_init_llvm_once();
1131
1132 if (device->physical_device->use_aco) {
1133 aco_compile_shader(shader_count, shaders, &binary, &args);
1134 } else {
1135 llvm_compile_shader(device, shader_count, shaders, &binary, &args);
1136 }
1137
1138 binary->info = *info;
1139
1140 struct radv_shader_variant *variant = radv_shader_variant_create(device, binary,
1141 keep_shader_info);
1142 if (!variant) {
1143 free(binary);
1144 return NULL;
1145 }
1146
1147 if (options->dump_shader) {
1148 fprintf(stderr, "%s", radv_get_shader_name(info, shaders[0]->info.stage));
1149 for (int i = 1; i < shader_count; ++i)
1150 fprintf(stderr, " + %s", radv_get_shader_name(info, shaders[i]->info.stage));
1151
1152 fprintf(stderr, "\ndisasm:\n%s\n", variant->disasm_string);
1153 }
1154
1155
1156 if (keep_shader_info) {
1157 variant->nir_string = radv_dump_nir_shaders(shaders, shader_count);
1158 if (!gs_copy_shader && !module->nir) {
1159 variant->spirv = malloc(module->size);
1160 if (!variant->spirv) {
1161 free(variant);
1162 free(binary);
1163 return NULL;
1164 }
1165
1166 memcpy(variant->spirv, module->data, module->size);
1167 variant->spirv_size = module->size;
1168 }
1169 }
1170
1171 if (binary_out)
1172 *binary_out = binary;
1173 else
1174 free(binary);
1175
1176 return variant;
1177 }
1178
1179 struct radv_shader_variant *
1180 radv_shader_variant_compile(struct radv_device *device,
1181 struct radv_shader_module *module,
1182 struct nir_shader *const *shaders,
1183 int shader_count,
1184 struct radv_pipeline_layout *layout,
1185 const struct radv_shader_variant_key *key,
1186 struct radv_shader_info *info,
1187 bool keep_shader_info, bool keep_statistic_info,
1188 struct radv_shader_binary **binary_out)
1189 {
1190 struct radv_nir_compiler_options options = {0};
1191
1192 options.layout = layout;
1193 if (key)
1194 options.key = *key;
1195
1196 options.explicit_scratch_args = device->physical_device->use_aco;
1197 options.robust_buffer_access = device->robust_buffer_access;
1198
1199 return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage, info,
1200 &options, false, keep_shader_info, keep_statistic_info, binary_out);
1201 }
1202
1203 struct radv_shader_variant *
1204 radv_create_gs_copy_shader(struct radv_device *device,
1205 struct nir_shader *shader,
1206 struct radv_shader_info *info,
1207 struct radv_shader_binary **binary_out,
1208 bool keep_shader_info, bool keep_statistic_info,
1209 bool multiview)
1210 {
1211 struct radv_nir_compiler_options options = {0};
1212
1213 options.explicit_scratch_args = device->physical_device->use_aco;
1214 options.key.has_multiview_view_index = multiview;
1215
1216 return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
1217 info, &options, true, keep_shader_info, keep_statistic_info, binary_out);
1218 }
1219
1220 void
1221 radv_shader_variant_destroy(struct radv_device *device,
1222 struct radv_shader_variant *variant)
1223 {
1224 if (!p_atomic_dec_zero(&variant->ref_count))
1225 return;
1226
1227 mtx_lock(&device->shader_slab_mutex);
1228 list_del(&variant->slab_list);
1229 mtx_unlock(&device->shader_slab_mutex);
1230
1231 free(variant->spirv);
1232 free(variant->nir_string);
1233 free(variant->disasm_string);
1234 free(variant->ir_string);
1235 free(variant->statistics);
1236 free(variant);
1237 }
1238
1239 const char *
1240 radv_get_shader_name(struct radv_shader_info *info,
1241 gl_shader_stage stage)
1242 {
1243 switch (stage) {
1244 case MESA_SHADER_VERTEX:
1245 if (info->vs.as_ls)
1246 return "Vertex Shader as LS";
1247 else if (info->vs.as_es)
1248 return "Vertex Shader as ES";
1249 else if (info->is_ngg)
1250 return "Vertex Shader as ESGS";
1251 else
1252 return "Vertex Shader as VS";
1253 case MESA_SHADER_TESS_CTRL:
1254 return "Tessellation Control Shader";
1255 case MESA_SHADER_TESS_EVAL:
1256 if (info->tes.as_es)
1257 return "Tessellation Evaluation Shader as ES";
1258 else if (info->is_ngg)
1259 return "Tessellation Evaluation Shader as ESGS";
1260 else
1261 return "Tessellation Evaluation Shader as VS";
1262 case MESA_SHADER_GEOMETRY:
1263 return "Geometry Shader";
1264 case MESA_SHADER_FRAGMENT:
1265 return "Pixel Shader";
1266 case MESA_SHADER_COMPUTE:
1267 return "Compute Shader";
1268 default:
1269 return "Unknown shader";
1270 };
1271 }
1272
1273 unsigned
1274 radv_get_max_workgroup_size(enum chip_class chip_class,
1275 gl_shader_stage stage,
1276 const unsigned *sizes)
1277 {
1278 switch (stage) {
1279 case MESA_SHADER_TESS_CTRL:
1280 return chip_class >= GFX7 ? 128 : 64;
1281 case MESA_SHADER_GEOMETRY:
1282 return chip_class >= GFX9 ? 128 : 64;
1283 case MESA_SHADER_COMPUTE:
1284 break;
1285 default:
1286 return 0;
1287 }
1288
1289 unsigned max_workgroup_size = sizes[0] * sizes[1] * sizes[2];
1290 return max_workgroup_size;
1291 }
1292
1293 unsigned
1294 radv_get_max_waves(struct radv_device *device,
1295 struct radv_shader_variant *variant,
1296 gl_shader_stage stage)
1297 {
1298 enum chip_class chip_class = device->physical_device->rad_info.chip_class;
1299 unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
1300 uint8_t wave_size = variant->info.wave_size;
1301 struct ac_shader_config *conf = &variant->config;
1302 unsigned max_simd_waves;
1303 unsigned lds_per_wave = 0;
1304
1305 max_simd_waves = device->physical_device->rad_info.max_wave64_per_simd;
1306
1307 if (stage == MESA_SHADER_FRAGMENT) {
1308 lds_per_wave = conf->lds_size * lds_increment +
1309 align(variant->info.ps.num_interp * 48,
1310 lds_increment);
1311 } else if (stage == MESA_SHADER_COMPUTE) {
1312 unsigned max_workgroup_size =
1313 radv_get_max_workgroup_size(chip_class, stage, variant->info.cs.block_size);
1314 lds_per_wave = (conf->lds_size * lds_increment) /
1315 DIV_ROUND_UP(max_workgroup_size, wave_size);
1316 }
1317
1318 if (conf->num_sgprs) {
1319 unsigned sgprs = align(conf->num_sgprs, chip_class >= GFX8 ? 16 : 8);
1320 max_simd_waves =
1321 MIN2(max_simd_waves,
1322 device->physical_device->rad_info.num_physical_sgprs_per_simd /
1323 sgprs);
1324 }
1325
1326 if (conf->num_vgprs) {
1327 unsigned vgprs = align(conf->num_vgprs, wave_size == 32 ? 8 : 4);
1328 max_simd_waves =
1329 MIN2(max_simd_waves,
1330 device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd / vgprs);
1331 }
1332
1333 unsigned max_lds_per_simd = device->physical_device->rad_info.lds_size_per_workgroup / device->physical_device->rad_info.num_simd_per_compute_unit;
1334 if (lds_per_wave)
1335 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1336
1337 return max_simd_waves;
1338 }
1339
1340 static void
1341 generate_shader_stats(struct radv_device *device,
1342 struct radv_shader_variant *variant,
1343 gl_shader_stage stage,
1344 struct _mesa_string_buffer *buf)
1345 {
1346 struct ac_shader_config *conf = &variant->config;
1347 unsigned max_simd_waves = radv_get_max_waves(device, variant, stage);
1348
1349 if (stage == MESA_SHADER_FRAGMENT) {
1350 _mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
1351 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1352 "SPI_PS_INPUT_ENA = 0x%04x\n",
1353 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1354 }
1355
1356 _mesa_string_buffer_printf(buf, "*** SHADER STATS ***\n"
1357 "SGPRS: %d\n"
1358 "VGPRS: %d\n"
1359 "Spilled SGPRs: %d\n"
1360 "Spilled VGPRs: %d\n"
1361 "PrivMem VGPRS: %d\n"
1362 "Code Size: %d bytes\n"
1363 "LDS: %d blocks\n"
1364 "Scratch: %d bytes per wave\n"
1365 "Max Waves: %d\n",
1366 conf->num_sgprs, conf->num_vgprs,
1367 conf->spilled_sgprs, conf->spilled_vgprs,
1368 variant->info.private_mem_vgprs, variant->exec_size,
1369 conf->lds_size, conf->scratch_bytes_per_wave,
1370 max_simd_waves);
1371
1372 if (variant->statistics) {
1373 _mesa_string_buffer_printf(buf, "*** COMPILER STATS ***\n");
1374 for (unsigned i = 0; i < variant->statistics->count; i++) {
1375 struct radv_compiler_statistic_info *info = &variant->statistics->infos[i];
1376 uint32_t value = variant->statistics->values[i];
1377 _mesa_string_buffer_printf(buf, "%s: %lu\n", info->name, value);
1378 }
1379 }
1380
1381 _mesa_string_buffer_printf(buf, "********************\n\n\n");
1382 }
1383
1384 void
1385 radv_shader_dump_stats(struct radv_device *device,
1386 struct radv_shader_variant *variant,
1387 gl_shader_stage stage,
1388 FILE *file)
1389 {
1390 struct _mesa_string_buffer *buf = _mesa_string_buffer_create(NULL, 256);
1391
1392 generate_shader_stats(device, variant, stage, buf);
1393
1394 fprintf(file, "\n%s:\n", radv_get_shader_name(&variant->info, stage));
1395 fprintf(file, "%s", buf->buf);
1396
1397 _mesa_string_buffer_destroy(buf);
1398 }
1399
1400 VkResult
1401 radv_GetShaderInfoAMD(VkDevice _device,
1402 VkPipeline _pipeline,
1403 VkShaderStageFlagBits shaderStage,
1404 VkShaderInfoTypeAMD infoType,
1405 size_t* pInfoSize,
1406 void* pInfo)
1407 {
1408 RADV_FROM_HANDLE(radv_device, device, _device);
1409 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1410 gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
1411 struct radv_shader_variant *variant = pipeline->shaders[stage];
1412 struct _mesa_string_buffer *buf;
1413 VkResult result = VK_SUCCESS;
1414
1415 /* Spec doesn't indicate what to do if the stage is invalid, so just
1416 * return no info for this. */
1417 if (!variant)
1418 return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
1419
1420 switch (infoType) {
1421 case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
1422 if (!pInfo) {
1423 *pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
1424 } else {
1425 unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
1426 struct ac_shader_config *conf = &variant->config;
1427
1428 VkShaderStatisticsInfoAMD statistics = {};
1429 statistics.shaderStageMask = shaderStage;
1430 statistics.numPhysicalVgprs = device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd;
1431 statistics.numPhysicalSgprs = device->physical_device->rad_info.num_physical_sgprs_per_simd;
1432 statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
1433
1434 if (stage == MESA_SHADER_COMPUTE) {
1435 unsigned *local_size = variant->info.cs.block_size;
1436 unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
1437
1438 statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
1439 ceil((double)workgroup_size / statistics.numPhysicalVgprs);
1440
1441 statistics.computeWorkGroupSize[0] = local_size[0];
1442 statistics.computeWorkGroupSize[1] = local_size[1];
1443 statistics.computeWorkGroupSize[2] = local_size[2];
1444 } else {
1445 statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
1446 }
1447
1448 statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
1449 statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
1450 statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
1451 statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
1452 statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
1453
1454 size_t size = *pInfoSize;
1455 *pInfoSize = sizeof(statistics);
1456
1457 memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
1458
1459 if (size < *pInfoSize)
1460 result = VK_INCOMPLETE;
1461 }
1462
1463 break;
1464 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
1465 buf = _mesa_string_buffer_create(NULL, 1024);
1466
1467 _mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(&variant->info, stage));
1468 _mesa_string_buffer_printf(buf, "%s\n\n", variant->ir_string);
1469 _mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
1470 generate_shader_stats(device, variant, stage, buf);
1471
1472 /* Need to include the null terminator. */
1473 size_t length = buf->length + 1;
1474
1475 if (!pInfo) {
1476 *pInfoSize = length;
1477 } else {
1478 size_t size = *pInfoSize;
1479 *pInfoSize = length;
1480
1481 memcpy(pInfo, buf->buf, MIN2(size, length));
1482
1483 if (size < length)
1484 result = VK_INCOMPLETE;
1485 }
1486
1487 _mesa_string_buffer_destroy(buf);
1488 break;
1489 default:
1490 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
1491 result = VK_ERROR_FEATURE_NOT_PRESENT;
1492 break;
1493 }
1494
1495 return result;
1496 }