radv: implement VK_KHR_shader_clock
[mesa.git] / src / amd / vulkan / radv_shader.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
34 #include "nir/nir.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
37
38 #include <llvm-c/Core.h>
39 #include <llvm-c/TargetMachine.h>
40 #include <llvm-c/Support.h>
41
42 #include "sid.h"
43 #include "ac_binary.h"
44 #include "ac_llvm_util.h"
45 #include "ac_nir_to_llvm.h"
46 #include "ac_rtld.h"
47 #include "vk_format.h"
48 #include "util/debug.h"
49 #include "ac_exp_param.h"
50
51 #include "aco_interface.h"
52
53 #include "util/string_buffer.h"
54
55 static const struct nir_shader_compiler_options nir_options_llvm = {
56 .vertex_id_zero_based = true,
57 .lower_scmp = true,
58 .lower_flrp16 = true,
59 .lower_flrp32 = true,
60 .lower_flrp64 = true,
61 .lower_device_index_to_zero = true,
62 .lower_fsat = true,
63 .lower_fdiv = true,
64 .lower_fmod = true,
65 .lower_bitfield_insert_to_bitfield_select = true,
66 .lower_bitfield_extract = true,
67 .lower_sub = true,
68 .lower_pack_snorm_2x16 = true,
69 .lower_pack_snorm_4x8 = true,
70 .lower_pack_unorm_2x16 = true,
71 .lower_pack_unorm_4x8 = true,
72 .lower_unpack_snorm_2x16 = true,
73 .lower_unpack_snorm_4x8 = true,
74 .lower_unpack_unorm_2x16 = true,
75 .lower_unpack_unorm_4x8 = true,
76 .lower_extract_byte = true,
77 .lower_extract_word = true,
78 .lower_ffma = true,
79 .lower_fpow = true,
80 .lower_mul_2x32_64 = true,
81 .lower_rotate = true,
82 .max_unroll_iterations = 32,
83 .use_interpolated_input_intrinsics = true,
84 };
85
86 static const struct nir_shader_compiler_options nir_options_aco = {
87 .vertex_id_zero_based = true,
88 .lower_scmp = true,
89 .lower_flrp16 = true,
90 .lower_flrp32 = true,
91 .lower_flrp64 = true,
92 .lower_device_index_to_zero = true,
93 .lower_fdiv = true,
94 .lower_fmod = true,
95 .lower_bitfield_insert_to_bitfield_select = true,
96 .lower_bitfield_extract = true,
97 .lower_pack_snorm_2x16 = true,
98 .lower_pack_snorm_4x8 = true,
99 .lower_pack_unorm_2x16 = true,
100 .lower_pack_unorm_4x8 = true,
101 .lower_unpack_snorm_2x16 = true,
102 .lower_unpack_snorm_4x8 = true,
103 .lower_unpack_unorm_2x16 = true,
104 .lower_unpack_unorm_4x8 = true,
105 .lower_unpack_half_2x16 = true,
106 .lower_extract_byte = true,
107 .lower_extract_word = true,
108 .lower_ffma = true,
109 .lower_fpow = true,
110 .lower_mul_2x32_64 = true,
111 .lower_rotate = true,
112 .max_unroll_iterations = 32,
113 .use_interpolated_input_intrinsics = true,
114 };
115
116 bool
117 radv_can_dump_shader(struct radv_device *device,
118 struct radv_shader_module *module,
119 bool is_gs_copy_shader)
120 {
121 if (!(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS))
122 return false;
123 if (module)
124 return !module->nir ||
125 (device->instance->debug_flags & RADV_DEBUG_DUMP_META_SHADERS);
126
127 return is_gs_copy_shader;
128 }
129
130 bool
131 radv_can_dump_shader_stats(struct radv_device *device,
132 struct radv_shader_module *module)
133 {
134 /* Only dump non-meta shader stats. */
135 return device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS &&
136 module && !module->nir;
137 }
138
139 unsigned shader_io_get_unique_index(gl_varying_slot slot)
140 {
141 /* handle patch indices separate */
142 if (slot == VARYING_SLOT_TESS_LEVEL_OUTER)
143 return 0;
144 if (slot == VARYING_SLOT_TESS_LEVEL_INNER)
145 return 1;
146 if (slot >= VARYING_SLOT_PATCH0 && slot <= VARYING_SLOT_TESS_MAX)
147 return 2 + (slot - VARYING_SLOT_PATCH0);
148 if (slot == VARYING_SLOT_POS)
149 return 0;
150 if (slot == VARYING_SLOT_PSIZ)
151 return 1;
152 if (slot == VARYING_SLOT_CLIP_DIST0)
153 return 2;
154 if (slot == VARYING_SLOT_CLIP_DIST1)
155 return 3;
156 /* 3 is reserved for clip dist as well */
157 if (slot >= VARYING_SLOT_VAR0 && slot <= VARYING_SLOT_VAR31)
158 return 4 + (slot - VARYING_SLOT_VAR0);
159 unreachable("illegal slot in get unique index\n");
160 }
161
162 VkResult radv_CreateShaderModule(
163 VkDevice _device,
164 const VkShaderModuleCreateInfo* pCreateInfo,
165 const VkAllocationCallbacks* pAllocator,
166 VkShaderModule* pShaderModule)
167 {
168 RADV_FROM_HANDLE(radv_device, device, _device);
169 struct radv_shader_module *module;
170
171 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
172 assert(pCreateInfo->flags == 0);
173
174 module = vk_alloc2(&device->alloc, pAllocator,
175 sizeof(*module) + pCreateInfo->codeSize, 8,
176 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
177 if (module == NULL)
178 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
179
180 module->nir = NULL;
181 module->size = pCreateInfo->codeSize;
182 memcpy(module->data, pCreateInfo->pCode, module->size);
183
184 _mesa_sha1_compute(module->data, module->size, module->sha1);
185
186 *pShaderModule = radv_shader_module_to_handle(module);
187
188 return VK_SUCCESS;
189 }
190
191 void radv_DestroyShaderModule(
192 VkDevice _device,
193 VkShaderModule _module,
194 const VkAllocationCallbacks* pAllocator)
195 {
196 RADV_FROM_HANDLE(radv_device, device, _device);
197 RADV_FROM_HANDLE(radv_shader_module, module, _module);
198
199 if (!module)
200 return;
201
202 vk_free2(&device->alloc, pAllocator, module);
203 }
204
205 void
206 radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
207 bool allow_copies)
208 {
209 bool progress;
210 unsigned lower_flrp =
211 (shader->options->lower_flrp16 ? 16 : 0) |
212 (shader->options->lower_flrp32 ? 32 : 0) |
213 (shader->options->lower_flrp64 ? 64 : 0);
214
215 do {
216 progress = false;
217
218 NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
219 NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
220
221 NIR_PASS_V(shader, nir_lower_vars_to_ssa);
222 NIR_PASS_V(shader, nir_lower_pack);
223
224 if (allow_copies) {
225 /* Only run this pass in the first call to
226 * radv_optimize_nir. Later calls assume that we've
227 * lowered away any copy_deref instructions and we
228 * don't want to introduce any more.
229 */
230 NIR_PASS(progress, shader, nir_opt_find_array_copies);
231 }
232
233 NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
234 NIR_PASS(progress, shader, nir_opt_dead_write_vars);
235 NIR_PASS(progress, shader, nir_remove_dead_variables,
236 nir_var_function_temp);
237
238 NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL, NULL);
239 NIR_PASS_V(shader, nir_lower_phis_to_scalar);
240
241 NIR_PASS(progress, shader, nir_copy_prop);
242 NIR_PASS(progress, shader, nir_opt_remove_phis);
243 NIR_PASS(progress, shader, nir_opt_dce);
244 if (nir_opt_trivial_continues(shader)) {
245 progress = true;
246 NIR_PASS(progress, shader, nir_copy_prop);
247 NIR_PASS(progress, shader, nir_opt_remove_phis);
248 NIR_PASS(progress, shader, nir_opt_dce);
249 }
250 NIR_PASS(progress, shader, nir_opt_if, true);
251 NIR_PASS(progress, shader, nir_opt_dead_cf);
252 NIR_PASS(progress, shader, nir_opt_cse);
253 NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
254 NIR_PASS(progress, shader, nir_opt_constant_folding);
255 NIR_PASS(progress, shader, nir_opt_algebraic);
256
257 if (lower_flrp != 0) {
258 bool lower_flrp_progress = false;
259 NIR_PASS(lower_flrp_progress,
260 shader,
261 nir_lower_flrp,
262 lower_flrp,
263 false /* always_precise */,
264 shader->options->lower_ffma);
265 if (lower_flrp_progress) {
266 NIR_PASS(progress, shader,
267 nir_opt_constant_folding);
268 progress = true;
269 }
270
271 /* Nothing should rematerialize any flrps, so we only
272 * need to do this lowering once.
273 */
274 lower_flrp = 0;
275 }
276
277 NIR_PASS(progress, shader, nir_opt_undef);
278 if (shader->options->max_unroll_iterations) {
279 NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
280 }
281 } while (progress && !optimize_conservatively);
282
283 NIR_PASS(progress, shader, nir_opt_conditional_discard);
284 NIR_PASS(progress, shader, nir_opt_shrink_load);
285 NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
286 }
287
288 nir_shader *
289 radv_shader_compile_to_nir(struct radv_device *device,
290 struct radv_shader_module *module,
291 const char *entrypoint_name,
292 gl_shader_stage stage,
293 const VkSpecializationInfo *spec_info,
294 const VkPipelineCreateFlags flags,
295 const struct radv_pipeline_layout *layout,
296 bool use_aco)
297 {
298 nir_shader *nir;
299 const nir_shader_compiler_options *nir_options = use_aco ? &nir_options_aco :
300 &nir_options_llvm;
301 if (module->nir) {
302 /* Some things such as our meta clear/blit code will give us a NIR
303 * shader directly. In that case, we just ignore the SPIR-V entirely
304 * and just use the NIR shader */
305 nir = module->nir;
306 nir->options = nir_options;
307 nir_validate_shader(nir, "in internal shader");
308
309 assert(exec_list_length(&nir->functions) == 1);
310 } else {
311 uint32_t *spirv = (uint32_t *) module->data;
312 assert(module->size % 4 == 0);
313
314 if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
315 radv_print_spirv(spirv, module->size, stderr);
316
317 uint32_t num_spec_entries = 0;
318 struct nir_spirv_specialization *spec_entries = NULL;
319 if (spec_info && spec_info->mapEntryCount > 0) {
320 num_spec_entries = spec_info->mapEntryCount;
321 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
322 for (uint32_t i = 0; i < num_spec_entries; i++) {
323 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
324 const void *data = spec_info->pData + entry.offset;
325 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
326
327 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
328 if (spec_info->dataSize == 8)
329 spec_entries[i].data64 = *(const uint64_t *)data;
330 else
331 spec_entries[i].data32 = *(const uint32_t *)data;
332 }
333 }
334 const struct spirv_to_nir_options spirv_options = {
335 .lower_ubo_ssbo_access_to_offsets = true,
336 .caps = {
337 .amd_gcn_shader = true,
338 .amd_shader_ballot = device->physical_device->use_shader_ballot,
339 .amd_trinary_minmax = true,
340 .demote_to_helper_invocation = device->physical_device->use_aco,
341 .derivative_group = true,
342 .descriptor_array_dynamic_indexing = true,
343 .descriptor_array_non_uniform_indexing = true,
344 .descriptor_indexing = true,
345 .device_group = true,
346 .draw_parameters = true,
347 .float16 = !device->physical_device->use_aco,
348 .float64 = true,
349 .geometry_streams = true,
350 .image_read_without_format = true,
351 .image_write_without_format = true,
352 .int8 = !device->physical_device->use_aco,
353 .int16 = !device->physical_device->use_aco,
354 .int64 = true,
355 .int64_atomics = true,
356 .multiview = true,
357 .physical_storage_buffer_address = true,
358 .post_depth_coverage = true,
359 .runtime_descriptor_array = true,
360 .shader_clock = true,
361 .shader_viewport_index_layer = true,
362 .stencil_export = true,
363 .storage_8bit = !device->physical_device->use_aco,
364 .storage_16bit = !device->physical_device->use_aco,
365 .storage_image_ms = true,
366 .subgroup_arithmetic = true,
367 .subgroup_ballot = true,
368 .subgroup_basic = true,
369 .subgroup_quad = true,
370 .subgroup_shuffle = true,
371 .subgroup_vote = true,
372 .tessellation = true,
373 .transform_feedback = true,
374 .variable_pointers = true,
375 },
376 .ubo_addr_format = nir_address_format_32bit_index_offset,
377 .ssbo_addr_format = nir_address_format_32bit_index_offset,
378 .phys_ssbo_addr_format = nir_address_format_64bit_global,
379 .push_const_addr_format = nir_address_format_logical,
380 .shared_addr_format = nir_address_format_32bit_offset,
381 .frag_coord_is_sysval = true,
382 };
383 nir = spirv_to_nir(spirv, module->size / 4,
384 spec_entries, num_spec_entries,
385 stage, entrypoint_name,
386 &spirv_options, nir_options);
387 assert(nir->info.stage == stage);
388 nir_validate_shader(nir, "after spirv_to_nir");
389
390 free(spec_entries);
391
392 /* We have to lower away local constant initializers right before we
393 * inline functions. That way they get properly initialized at the top
394 * of the function and not at the top of its caller.
395 */
396 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
397 NIR_PASS_V(nir, nir_lower_returns);
398 NIR_PASS_V(nir, nir_inline_functions);
399 NIR_PASS_V(nir, nir_opt_deref);
400
401 /* Pick off the single entrypoint that we want */
402 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
403 if (func->is_entrypoint)
404 func->name = ralloc_strdup(func, "main");
405 else
406 exec_node_remove(&func->node);
407 }
408 assert(exec_list_length(&nir->functions) == 1);
409
410 /* Make sure we lower constant initializers on output variables so that
411 * nir_remove_dead_variables below sees the corresponding stores
412 */
413 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
414
415 /* Now that we've deleted all but the main function, we can go ahead and
416 * lower the rest of the constant initializers.
417 */
418 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
419
420 /* Split member structs. We do this before lower_io_to_temporaries so that
421 * it doesn't lower system values to temporaries by accident.
422 */
423 NIR_PASS_V(nir, nir_split_var_copies);
424 NIR_PASS_V(nir, nir_split_per_member_structs);
425
426 if (nir->info.stage == MESA_SHADER_FRAGMENT && use_aco)
427 NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out);
428 if (nir->info.stage == MESA_SHADER_FRAGMENT)
429 NIR_PASS_V(nir, nir_lower_input_attachments, true);
430
431 NIR_PASS_V(nir, nir_remove_dead_variables,
432 nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
433
434 NIR_PASS_V(nir, nir_propagate_invariant);
435
436 NIR_PASS_V(nir, nir_lower_system_values);
437 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
438 NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
439 }
440
441 /* Vulkan uses the separate-shader linking model */
442 nir->info.separate_shader = true;
443
444 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
445
446 static const nir_lower_tex_options tex_options = {
447 .lower_txp = ~0,
448 .lower_tg4_offsets = true,
449 };
450
451 nir_lower_tex(nir, &tex_options);
452
453 nir_lower_vars_to_ssa(nir);
454
455 if (nir->info.stage == MESA_SHADER_VERTEX ||
456 nir->info.stage == MESA_SHADER_GEOMETRY ||
457 nir->info.stage == MESA_SHADER_FRAGMENT) {
458 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
459 nir_shader_get_entrypoint(nir), true, true);
460 } else if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
461 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
462 nir_shader_get_entrypoint(nir), true, false);
463 }
464
465 nir_split_var_copies(nir);
466
467 nir_lower_global_vars_to_local(nir);
468 nir_remove_dead_variables(nir, nir_var_function_temp);
469 nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
470 .subgroup_size = 64,
471 .ballot_bit_size = 64,
472 .lower_to_scalar = 1,
473 .lower_subgroup_masks = 1,
474 .lower_shuffle = 1,
475 .lower_shuffle_to_32bit = 1,
476 .lower_vote_eq_to_ballot = 1,
477 });
478
479 nir_lower_load_const_to_scalar(nir);
480
481 if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
482 radv_optimize_nir(nir, false, true);
483
484 /* We call nir_lower_var_copies() after the first radv_optimize_nir()
485 * to remove any copies introduced by nir_opt_find_array_copies().
486 */
487 nir_lower_var_copies(nir);
488
489 /* Lower large variables that are always constant with load_constant
490 * intrinsics, which get turned into PC-relative loads from a data
491 * section next to the shader.
492 */
493 NIR_PASS_V(nir, nir_opt_large_constants,
494 glsl_get_natural_size_align_bytes, 16);
495
496 /* Indirect lowering must be called after the radv_optimize_nir() loop
497 * has been called at least once. Otherwise indirect lowering can
498 * bloat the instruction count of the loop and cause it to be
499 * considered too large for unrolling.
500 */
501 ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
502 radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, false);
503
504 return nir;
505 }
506
507 static int
508 type_size_vec4(const struct glsl_type *type, bool bindless)
509 {
510 return glsl_count_attribute_slots(type, false);
511 }
512
513 static nir_variable *
514 find_layer_in_var(nir_shader *nir)
515 {
516 nir_foreach_variable(var, &nir->inputs) {
517 if (var->data.location == VARYING_SLOT_LAYER) {
518 return var;
519 }
520 }
521
522 nir_variable *var =
523 nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
524 var->data.location = VARYING_SLOT_LAYER;
525 var->data.interpolation = INTERP_MODE_FLAT;
526 return var;
527 }
528
529 /* We use layered rendering to implement multiview, which means we need to map
530 * view_index to gl_Layer. The attachment lowering also uses needs to know the
531 * layer so that it can sample from the correct layer. The code generates a
532 * load from the layer_id sysval, but since we don't have a way to get at this
533 * information from the fragment shader, we also need to lower this to the
534 * gl_Layer varying. This pass lowers both to a varying load from the LAYER
535 * slot, before lowering io, so that nir_assign_var_locations() will give the
536 * LAYER varying the correct driver_location.
537 */
538
539 static bool
540 lower_view_index(nir_shader *nir)
541 {
542 bool progress = false;
543 nir_function_impl *entry = nir_shader_get_entrypoint(nir);
544 nir_builder b;
545 nir_builder_init(&b, entry);
546
547 nir_variable *layer = NULL;
548 nir_foreach_block(block, entry) {
549 nir_foreach_instr_safe(instr, block) {
550 if (instr->type != nir_instr_type_intrinsic)
551 continue;
552
553 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
554 if (load->intrinsic != nir_intrinsic_load_view_index &&
555 load->intrinsic != nir_intrinsic_load_layer_id)
556 continue;
557
558 if (!layer)
559 layer = find_layer_in_var(nir);
560
561 b.cursor = nir_before_instr(instr);
562 nir_ssa_def *def = nir_load_var(&b, layer);
563 nir_ssa_def_rewrite_uses(&load->dest.ssa,
564 nir_src_for_ssa(def));
565
566 nir_instr_remove(instr);
567 progress = true;
568 }
569 }
570
571 return progress;
572 }
573
574 void
575 radv_lower_fs_io(nir_shader *nir)
576 {
577 NIR_PASS_V(nir, lower_view_index);
578 nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs,
579 MESA_SHADER_FRAGMENT);
580
581 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
582
583 /* This pass needs actual constants */
584 nir_opt_constant_folding(nir);
585
586 NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
587 }
588
589
590 void *
591 radv_alloc_shader_memory(struct radv_device *device,
592 struct radv_shader_variant *shader)
593 {
594 mtx_lock(&device->shader_slab_mutex);
595 list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
596 uint64_t offset = 0;
597 list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
598 if (s->bo_offset - offset >= shader->code_size) {
599 shader->bo = slab->bo;
600 shader->bo_offset = offset;
601 list_addtail(&shader->slab_list, &s->slab_list);
602 mtx_unlock(&device->shader_slab_mutex);
603 return slab->ptr + offset;
604 }
605 offset = align_u64(s->bo_offset + s->code_size, 256);
606 }
607 if (slab->size - offset >= shader->code_size) {
608 shader->bo = slab->bo;
609 shader->bo_offset = offset;
610 list_addtail(&shader->slab_list, &slab->shaders);
611 mtx_unlock(&device->shader_slab_mutex);
612 return slab->ptr + offset;
613 }
614 }
615
616 mtx_unlock(&device->shader_slab_mutex);
617 struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
618
619 slab->size = 256 * 1024;
620 slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
621 RADEON_DOMAIN_VRAM,
622 RADEON_FLAG_NO_INTERPROCESS_SHARING |
623 (device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
624 0 : RADEON_FLAG_READ_ONLY),
625 RADV_BO_PRIORITY_SHADER);
626 slab->ptr = (char*)device->ws->buffer_map(slab->bo);
627 list_inithead(&slab->shaders);
628
629 mtx_lock(&device->shader_slab_mutex);
630 list_add(&slab->slabs, &device->shader_slabs);
631
632 shader->bo = slab->bo;
633 shader->bo_offset = 0;
634 list_add(&shader->slab_list, &slab->shaders);
635 mtx_unlock(&device->shader_slab_mutex);
636 return slab->ptr;
637 }
638
639 void
640 radv_destroy_shader_slabs(struct radv_device *device)
641 {
642 list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
643 device->ws->buffer_destroy(slab->bo);
644 free(slab);
645 }
646 mtx_destroy(&device->shader_slab_mutex);
647 }
648
649 /* For the UMR disassembler. */
650 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
651 #define DEBUGGER_NUM_MARKERS 5
652
653 static unsigned
654 radv_get_shader_binary_size(size_t code_size)
655 {
656 return code_size + DEBUGGER_NUM_MARKERS * 4;
657 }
658
659 static void radv_postprocess_config(const struct radv_physical_device *pdevice,
660 const struct ac_shader_config *config_in,
661 const struct radv_shader_info *info,
662 gl_shader_stage stage,
663 struct ac_shader_config *config_out)
664 {
665 bool scratch_enabled = config_in->scratch_bytes_per_wave > 0;
666 unsigned vgpr_comp_cnt = 0;
667 unsigned num_input_vgprs = info->num_input_vgprs;
668
669 if (stage == MESA_SHADER_FRAGMENT) {
670 num_input_vgprs = ac_get_fs_input_vgpr_cnt(config_in, NULL, NULL);
671 }
672
673 unsigned num_vgprs = MAX2(config_in->num_vgprs, num_input_vgprs);
674 /* +3 for scratch wave offset and VCC */
675 unsigned num_sgprs = MAX2(config_in->num_sgprs, info->num_input_sgprs + 3);
676 unsigned num_shared_vgprs = config_in->num_shared_vgprs;
677 /* shared VGPRs are introduced in Navi and are allocated in blocks of 8 (RDNA ref 3.6.5) */
678 assert((pdevice->rad_info.chip_class >= GFX10 && num_shared_vgprs % 8 == 0)
679 || (pdevice->rad_info.chip_class < GFX10 && num_shared_vgprs == 0));
680 unsigned num_shared_vgpr_blocks = num_shared_vgprs / 8;
681
682 *config_out = *config_in;
683 config_out->num_vgprs = num_vgprs;
684 config_out->num_sgprs = num_sgprs;
685 config_out->num_shared_vgprs = num_shared_vgprs;
686
687 /* Enable 64-bit and 16-bit denormals, because there is no performance
688 * cost.
689 *
690 * If denormals are enabled, all floating-point output modifiers are
691 * ignored.
692 *
693 * Don't enable denormals for 32-bit floats, because:
694 * - Floating-point output modifiers would be ignored by the hw.
695 * - Some opcodes don't support denormals, such as v_mad_f32. We would
696 * have to stop using those.
697 * - GFX6 & GFX7 would be very slow.
698 */
699 config_out->float_mode |= V_00B028_FP_64_DENORMS;
700
701 config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
702 S_00B12C_SCRATCH_EN(scratch_enabled);
703
704 if (!pdevice->use_ngg_streamout) {
705 config_out->rsrc2 |= S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
706 S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
707 S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
708 S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
709 S_00B12C_SO_EN(!!info->so.num_outputs);
710 }
711
712 config_out->rsrc1 = S_00B848_VGPRS((num_vgprs - 1) /
713 (info->wave_size == 32 ? 8 : 4)) |
714 S_00B848_DX10_CLAMP(1) |
715 S_00B848_FLOAT_MODE(config_out->float_mode);
716
717 if (pdevice->rad_info.chip_class >= GFX10) {
718 config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(info->num_user_sgprs >> 5);
719 } else {
720 config_out->rsrc1 |= S_00B228_SGPRS((num_sgprs - 1) / 8);
721 config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(info->num_user_sgprs >> 5);
722 }
723
724 switch (stage) {
725 case MESA_SHADER_TESS_EVAL:
726 if (info->is_ngg) {
727 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
728 config_out->rsrc2 |= S_00B22C_OC_LDS_EN(1);
729 } else if (info->tes.as_es) {
730 assert(pdevice->rad_info.chip_class <= GFX8);
731 vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
732
733 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
734 } else {
735 bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
736 vgpr_comp_cnt = enable_prim_id ? 3 : 2;
737
738 config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
739 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
740 }
741 config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
742 break;
743 case MESA_SHADER_TESS_CTRL:
744 if (pdevice->rad_info.chip_class >= GFX9) {
745 /* We need at least 2 components for LS.
746 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
747 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
748 */
749 if (pdevice->rad_info.chip_class >= GFX10) {
750 vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1;
751 } else {
752 vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
753 }
754 } else {
755 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
756 }
757 config_out->rsrc1 |= S_00B428_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
758 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
759 config_out->rsrc2 |= S_00B42C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
760 break;
761 case MESA_SHADER_VERTEX:
762 if (info->is_ngg) {
763 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
764 } else if (info->vs.as_ls) {
765 assert(pdevice->rad_info.chip_class <= GFX8);
766 /* We need at least 2 components for LS.
767 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
768 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
769 */
770 vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
771 } else if (info->vs.as_es) {
772 assert(pdevice->rad_info.chip_class <= GFX8);
773 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
774 vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
775 } else {
776 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
777 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
778 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
779 */
780 if (info->vs.needs_instance_id && pdevice->rad_info.chip_class >= GFX10) {
781 vgpr_comp_cnt = 3;
782 } else if (info->vs.export_prim_id) {
783 vgpr_comp_cnt = 2;
784 } else if (info->vs.needs_instance_id) {
785 vgpr_comp_cnt = 1;
786 } else {
787 vgpr_comp_cnt = 0;
788 }
789
790 config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
791 config_out->rsrc2 |= S_00B12C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
792 }
793 break;
794 case MESA_SHADER_FRAGMENT:
795 config_out->rsrc1 |= S_00B028_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
796 config_out->rsrc2 |= S_00B02C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
797 break;
798 case MESA_SHADER_GEOMETRY:
799 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
800 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
801 config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
802 break;
803 case MESA_SHADER_COMPUTE:
804 config_out->rsrc1 |= S_00B848_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
805 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
806 config_out->rsrc2 |=
807 S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
808 S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
809 S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
810 S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
811 info->cs.uses_thread_id[1] ? 1 : 0) |
812 S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
813 S_00B84C_LDS_SIZE(config_in->lds_size);
814 config_out->rsrc3 |= S_00B8A0_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
815
816 break;
817 default:
818 unreachable("unsupported shader type");
819 break;
820 }
821
822 if (pdevice->rad_info.chip_class >= GFX10 && info->is_ngg &&
823 (stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_GEOMETRY)) {
824 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
825 gl_shader_stage es_stage = stage;
826 if (stage == MESA_SHADER_GEOMETRY)
827 es_stage = info->gs.es_type;
828
829 /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
830 if (es_stage == MESA_SHADER_VERTEX) {
831 es_vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 0;
832 } else if (es_stage == MESA_SHADER_TESS_EVAL) {
833 bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
834 es_vgpr_comp_cnt = enable_prim_id ? 3 : 2;
835 } else
836 unreachable("Unexpected ES shader stage");
837
838 bool tes_triangles = stage == MESA_SHADER_TESS_EVAL &&
839 info->tes.primitive_mode >= 4; /* GL_TRIANGLES */
840 if (info->uses_invocation_id || stage == MESA_SHADER_VERTEX) {
841 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
842 } else if (info->uses_prim_id) {
843 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
844 } else if (info->gs.vertices_in >= 3 || tes_triangles) {
845 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
846 } else {
847 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
848 }
849
850 config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt) |
851 S_00B228_WGP_MODE(1);
852 config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
853 S_00B22C_LDS_SIZE(config_in->lds_size) |
854 S_00B22C_OC_LDS_EN(es_stage == MESA_SHADER_TESS_EVAL);
855 } else if (pdevice->rad_info.chip_class >= GFX9 &&
856 stage == MESA_SHADER_GEOMETRY) {
857 unsigned es_type = info->gs.es_type;
858 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
859
860 if (es_type == MESA_SHADER_VERTEX) {
861 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
862 if (info->vs.needs_instance_id) {
863 es_vgpr_comp_cnt = pdevice->rad_info.chip_class >= GFX10 ? 3 : 1;
864 } else {
865 es_vgpr_comp_cnt = 0;
866 }
867 } else if (es_type == MESA_SHADER_TESS_EVAL) {
868 es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
869 } else {
870 unreachable("invalid shader ES type");
871 }
872
873 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
874 * VGPR[0:4] are always loaded.
875 */
876 if (info->uses_invocation_id) {
877 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
878 } else if (info->uses_prim_id) {
879 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
880 } else if (info->gs.vertices_in >= 3) {
881 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
882 } else {
883 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
884 }
885
886 config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
887 config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
888 S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
889 } else if (pdevice->rad_info.chip_class >= GFX9 &&
890 stage == MESA_SHADER_TESS_CTRL) {
891 config_out->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
892 } else {
893 config_out->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
894 }
895 }
896
897 struct radv_shader_variant *
898 radv_shader_variant_create(struct radv_device *device,
899 const struct radv_shader_binary *binary,
900 bool keep_shader_info)
901 {
902 struct ac_shader_config config = {0};
903 struct ac_rtld_binary rtld_binary = {0};
904 struct radv_shader_variant *variant = calloc(1, sizeof(struct radv_shader_variant));
905 if (!variant)
906 return NULL;
907
908 variant->ref_count = 1;
909
910 if (binary->type == RADV_BINARY_TYPE_RTLD) {
911 struct ac_rtld_symbol lds_symbols[2];
912 unsigned num_lds_symbols = 0;
913 const char *elf_data = (const char *)((struct radv_shader_binary_rtld *)binary)->data;
914 size_t elf_size = ((struct radv_shader_binary_rtld *)binary)->elf_size;
915
916 if (device->physical_device->rad_info.chip_class >= GFX9 &&
917 (binary->stage == MESA_SHADER_GEOMETRY || binary->info.is_ngg) &&
918 !binary->is_gs_copy_shader) {
919 /* We add this symbol even on LLVM <= 8 to ensure that
920 * shader->config.lds_size is set correctly below.
921 */
922 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
923 sym->name = "esgs_ring";
924 sym->size = binary->info.ngg_info.esgs_ring_size;
925 sym->align = 64 * 1024;
926 }
927
928 if (binary->info.is_ngg &&
929 binary->stage == MESA_SHADER_GEOMETRY) {
930 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
931 sym->name = "ngg_emit";
932 sym->size = binary->info.ngg_info.ngg_emit_size * 4;
933 sym->align = 4;
934 }
935
936 struct ac_rtld_open_info open_info = {
937 .info = &device->physical_device->rad_info,
938 .shader_type = binary->stage,
939 .wave_size = binary->info.wave_size,
940 .num_parts = 1,
941 .elf_ptrs = &elf_data,
942 .elf_sizes = &elf_size,
943 .num_shared_lds_symbols = num_lds_symbols,
944 .shared_lds_symbols = lds_symbols,
945 };
946
947 if (!ac_rtld_open(&rtld_binary, open_info)) {
948 free(variant);
949 return NULL;
950 }
951
952 if (!ac_rtld_read_config(&rtld_binary, &config)) {
953 ac_rtld_close(&rtld_binary);
954 free(variant);
955 return NULL;
956 }
957
958 if (rtld_binary.lds_size > 0) {
959 unsigned alloc_granularity = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
960 config.lds_size = align(rtld_binary.lds_size, alloc_granularity) / alloc_granularity;
961 }
962
963 variant->code_size = rtld_binary.rx_size;
964 variant->exec_size = rtld_binary.exec_size;
965 } else {
966 assert(binary->type == RADV_BINARY_TYPE_LEGACY);
967 config = ((struct radv_shader_binary_legacy *)binary)->config;
968 variant->code_size = radv_get_shader_binary_size(((struct radv_shader_binary_legacy *)binary)->code_size);
969 variant->exec_size = ((struct radv_shader_binary_legacy *)binary)->exec_size;
970 }
971
972 variant->info = binary->info;
973 radv_postprocess_config(device->physical_device, &config, &binary->info,
974 binary->stage, &variant->config);
975
976 void *dest_ptr = radv_alloc_shader_memory(device, variant);
977
978 if (binary->type == RADV_BINARY_TYPE_RTLD) {
979 struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;
980 struct ac_rtld_upload_info info = {
981 .binary = &rtld_binary,
982 .rx_va = radv_buffer_get_va(variant->bo) + variant->bo_offset,
983 .rx_ptr = dest_ptr,
984 };
985
986 if (!ac_rtld_upload(&info)) {
987 radv_shader_variant_destroy(device, variant);
988 ac_rtld_close(&rtld_binary);
989 return NULL;
990 }
991
992 if (keep_shader_info ||
993 (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS)) {
994 const char *disasm_data;
995 size_t disasm_size;
996 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm_data, &disasm_size)) {
997 radv_shader_variant_destroy(device, variant);
998 ac_rtld_close(&rtld_binary);
999 return NULL;
1000 }
1001
1002 variant->ir_string = bin->llvm_ir_size ? strdup((const char*)(bin->data + bin->elf_size)) : NULL;
1003 variant->disasm_string = malloc(disasm_size + 1);
1004 memcpy(variant->disasm_string, disasm_data, disasm_size);
1005 variant->disasm_string[disasm_size] = 0;
1006 }
1007
1008 ac_rtld_close(&rtld_binary);
1009 } else {
1010 struct radv_shader_binary_legacy* bin = (struct radv_shader_binary_legacy *)binary;
1011 memcpy(dest_ptr, bin->data, bin->code_size);
1012
1013 /* Add end-of-code markers for the UMR disassembler. */
1014 uint32_t *ptr32 = (uint32_t *)dest_ptr + bin->code_size / 4;
1015 for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
1016 ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
1017
1018 variant->ir_string = bin->ir_size ? strdup((const char*)(bin->data + bin->code_size)) : NULL;
1019 variant->disasm_string = bin->disasm_size ? strdup((const char*)(bin->data + bin->code_size + bin->ir_size)) : NULL;
1020 }
1021 return variant;
1022 }
1023
1024 static char *
1025 radv_dump_nir_shaders(struct nir_shader * const *shaders,
1026 int shader_count)
1027 {
1028 char *data = NULL;
1029 char *ret = NULL;
1030 size_t size = 0;
1031 FILE *f = open_memstream(&data, &size);
1032 if (f) {
1033 for (int i = 0; i < shader_count; ++i)
1034 nir_print_shader(shaders[i], f);
1035 fclose(f);
1036 }
1037
1038 ret = malloc(size + 1);
1039 if (ret) {
1040 memcpy(ret, data, size);
1041 ret[size] = 0;
1042 }
1043 free(data);
1044 return ret;
1045 }
1046
1047 static struct radv_shader_variant *
1048 shader_variant_compile(struct radv_device *device,
1049 struct radv_shader_module *module,
1050 struct nir_shader * const *shaders,
1051 int shader_count,
1052 gl_shader_stage stage,
1053 struct radv_shader_info *info,
1054 struct radv_nir_compiler_options *options,
1055 bool gs_copy_shader,
1056 bool keep_shader_info,
1057 bool use_aco,
1058 struct radv_shader_binary **binary_out)
1059 {
1060 enum radeon_family chip_family = device->physical_device->rad_info.family;
1061 struct radv_shader_binary *binary = NULL;
1062
1063 options->family = chip_family;
1064 options->chip_class = device->physical_device->rad_info.chip_class;
1065 options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
1066 options->dump_preoptir = options->dump_shader &&
1067 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
1068 options->record_ir = keep_shader_info;
1069 options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
1070 options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
1071 options->address32_hi = device->physical_device->rad_info.address32_hi;
1072 options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug;
1073 options->use_ngg_streamout = device->physical_device->use_ngg_streamout;
1074
1075 if ((stage == MESA_SHADER_GEOMETRY && !options->key.vs_common_out.as_ngg) ||
1076 gs_copy_shader)
1077 options->wave_size = 64;
1078 else if (stage == MESA_SHADER_COMPUTE)
1079 options->wave_size = device->physical_device->cs_wave_size;
1080 else if (stage == MESA_SHADER_FRAGMENT)
1081 options->wave_size = device->physical_device->ps_wave_size;
1082 else
1083 options->wave_size = device->physical_device->ge_wave_size;
1084
1085 if (!use_aco || options->dump_shader || options->record_ir)
1086 ac_init_llvm_once();
1087
1088 if (use_aco) {
1089 aco_compile_shader(shader_count, shaders, &binary, info, options);
1090 binary->info = *info;
1091 } else {
1092 enum ac_target_machine_options tm_options = 0;
1093 struct ac_llvm_compiler ac_llvm;
1094 bool thread_compiler;
1095
1096 if (options->supports_spill)
1097 tm_options |= AC_TM_SUPPORTS_SPILL;
1098 if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
1099 tm_options |= AC_TM_SISCHED;
1100 if (options->check_ir)
1101 tm_options |= AC_TM_CHECK_IR;
1102 if (device->instance->debug_flags & RADV_DEBUG_NO_LOAD_STORE_OPT)
1103 tm_options |= AC_TM_NO_LOAD_STORE_OPT;
1104
1105 thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
1106 radv_init_llvm_compiler(&ac_llvm,
1107 thread_compiler,
1108 chip_family, tm_options,
1109 options->wave_size);
1110
1111 if (gs_copy_shader) {
1112 assert(shader_count == 1);
1113 radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
1114 info, options);
1115 } else {
1116 radv_compile_nir_shader(&ac_llvm, &binary, info,
1117 shaders, shader_count, options);
1118 }
1119
1120 binary->info = *info;
1121 radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
1122 }
1123
1124 struct radv_shader_variant *variant = radv_shader_variant_create(device, binary,
1125 keep_shader_info);
1126 if (!variant) {
1127 free(binary);
1128 return NULL;
1129 }
1130 variant->aco_used = use_aco;
1131
1132 if (options->dump_shader) {
1133 fprintf(stderr, "disasm:\n%s\n", variant->disasm_string);
1134 }
1135
1136
1137 if (keep_shader_info) {
1138 variant->nir_string = radv_dump_nir_shaders(shaders, shader_count);
1139 if (!gs_copy_shader && !module->nir) {
1140 variant->spirv = (uint32_t *)module->data;
1141 variant->spirv_size = module->size;
1142 }
1143 }
1144
1145 if (binary_out)
1146 *binary_out = binary;
1147 else
1148 free(binary);
1149
1150 return variant;
1151 }
1152
1153 struct radv_shader_variant *
1154 radv_shader_variant_compile(struct radv_device *device,
1155 struct radv_shader_module *module,
1156 struct nir_shader *const *shaders,
1157 int shader_count,
1158 struct radv_pipeline_layout *layout,
1159 const struct radv_shader_variant_key *key,
1160 struct radv_shader_info *info,
1161 bool keep_shader_info,
1162 bool use_aco,
1163 struct radv_shader_binary **binary_out)
1164 {
1165 struct radv_nir_compiler_options options = {0};
1166
1167 options.layout = layout;
1168 if (key)
1169 options.key = *key;
1170
1171 options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
1172 options.supports_spill = true;
1173 options.robust_buffer_access = device->robust_buffer_access;
1174
1175 return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage, info,
1176 &options, false, keep_shader_info, use_aco, binary_out);
1177 }
1178
1179 struct radv_shader_variant *
1180 radv_create_gs_copy_shader(struct radv_device *device,
1181 struct nir_shader *shader,
1182 struct radv_shader_info *info,
1183 struct radv_shader_binary **binary_out,
1184 bool keep_shader_info,
1185 bool multiview)
1186 {
1187 struct radv_nir_compiler_options options = {0};
1188
1189 options.key.has_multiview_view_index = multiview;
1190
1191 return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
1192 info, &options, true, keep_shader_info, false, binary_out);
1193 }
1194
1195 void
1196 radv_shader_variant_destroy(struct radv_device *device,
1197 struct radv_shader_variant *variant)
1198 {
1199 if (!p_atomic_dec_zero(&variant->ref_count))
1200 return;
1201
1202 mtx_lock(&device->shader_slab_mutex);
1203 list_del(&variant->slab_list);
1204 mtx_unlock(&device->shader_slab_mutex);
1205
1206 free(variant->nir_string);
1207 free(variant->disasm_string);
1208 free(variant->ir_string);
1209 free(variant);
1210 }
1211
1212 const char *
1213 radv_get_shader_name(struct radv_shader_info *info,
1214 gl_shader_stage stage)
1215 {
1216 switch (stage) {
1217 case MESA_SHADER_VERTEX:
1218 if (info->vs.as_ls)
1219 return "Vertex Shader as LS";
1220 else if (info->vs.as_es)
1221 return "Vertex Shader as ES";
1222 else if (info->is_ngg)
1223 return "Vertex Shader as ESGS";
1224 else
1225 return "Vertex Shader as VS";
1226 case MESA_SHADER_TESS_CTRL:
1227 return "Tessellation Control Shader";
1228 case MESA_SHADER_TESS_EVAL:
1229 if (info->tes.as_es)
1230 return "Tessellation Evaluation Shader as ES";
1231 else if (info->is_ngg)
1232 return "Tessellation Evaluation Shader as ESGS";
1233 else
1234 return "Tessellation Evaluation Shader as VS";
1235 case MESA_SHADER_GEOMETRY:
1236 return "Geometry Shader";
1237 case MESA_SHADER_FRAGMENT:
1238 return "Pixel Shader";
1239 case MESA_SHADER_COMPUTE:
1240 return "Compute Shader";
1241 default:
1242 return "Unknown shader";
1243 };
1244 }
1245
1246 unsigned
1247 radv_get_max_workgroup_size(enum chip_class chip_class,
1248 gl_shader_stage stage,
1249 const unsigned *sizes)
1250 {
1251 switch (stage) {
1252 case MESA_SHADER_TESS_CTRL:
1253 return chip_class >= GFX7 ? 128 : 64;
1254 case MESA_SHADER_GEOMETRY:
1255 return chip_class >= GFX9 ? 128 : 64;
1256 case MESA_SHADER_COMPUTE:
1257 break;
1258 default:
1259 return 0;
1260 }
1261
1262 unsigned max_workgroup_size = sizes[0] * sizes[1] * sizes[2];
1263 return max_workgroup_size;
1264 }
1265
1266 unsigned
1267 radv_get_max_waves(struct radv_device *device,
1268 struct radv_shader_variant *variant,
1269 gl_shader_stage stage)
1270 {
1271 enum chip_class chip_class = device->physical_device->rad_info.chip_class;
1272 unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
1273 uint8_t wave_size = variant->info.wave_size;
1274 struct ac_shader_config *conf = &variant->config;
1275 unsigned max_simd_waves;
1276 unsigned lds_per_wave = 0;
1277
1278 max_simd_waves = device->physical_device->rad_info.max_wave64_per_simd;
1279
1280 if (stage == MESA_SHADER_FRAGMENT) {
1281 lds_per_wave = conf->lds_size * lds_increment +
1282 align(variant->info.ps.num_interp * 48,
1283 lds_increment);
1284 } else if (stage == MESA_SHADER_COMPUTE) {
1285 unsigned max_workgroup_size =
1286 radv_get_max_workgroup_size(chip_class, stage, variant->info.cs.block_size);
1287 lds_per_wave = (conf->lds_size * lds_increment) /
1288 DIV_ROUND_UP(max_workgroup_size, wave_size);
1289 }
1290
1291 if (conf->num_sgprs)
1292 max_simd_waves =
1293 MIN2(max_simd_waves,
1294 device->physical_device->rad_info.num_physical_sgprs_per_simd /
1295 conf->num_sgprs);
1296
1297 if (conf->num_vgprs)
1298 max_simd_waves =
1299 MIN2(max_simd_waves,
1300 RADV_NUM_PHYSICAL_VGPRS / conf->num_vgprs);
1301
1302 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
1303 * that PS can use.
1304 */
1305 if (lds_per_wave)
1306 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
1307
1308 return max_simd_waves;
1309 }
1310
1311 static void
1312 generate_shader_stats(struct radv_device *device,
1313 struct radv_shader_variant *variant,
1314 gl_shader_stage stage,
1315 struct _mesa_string_buffer *buf)
1316 {
1317 struct ac_shader_config *conf = &variant->config;
1318 unsigned max_simd_waves = radv_get_max_waves(device, variant, stage);
1319
1320 if (stage == MESA_SHADER_FRAGMENT) {
1321 _mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
1322 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1323 "SPI_PS_INPUT_ENA = 0x%04x\n",
1324 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1325 }
1326
1327 _mesa_string_buffer_printf(buf, "*** SHADER STATS ***\n"
1328 "SGPRS: %d\n"
1329 "VGPRS: %d\n"
1330 "Spilled SGPRs: %d\n"
1331 "Spilled VGPRs: %d\n"
1332 "PrivMem VGPRS: %d\n"
1333 "Code Size: %d bytes\n"
1334 "LDS: %d blocks\n"
1335 "Scratch: %d bytes per wave\n"
1336 "Max Waves: %d\n"
1337 "********************\n\n\n",
1338 conf->num_sgprs, conf->num_vgprs,
1339 conf->spilled_sgprs, conf->spilled_vgprs,
1340 variant->info.private_mem_vgprs, variant->exec_size,
1341 conf->lds_size, conf->scratch_bytes_per_wave,
1342 max_simd_waves);
1343 }
1344
1345 void
1346 radv_shader_dump_stats(struct radv_device *device,
1347 struct radv_shader_variant *variant,
1348 gl_shader_stage stage,
1349 FILE *file)
1350 {
1351 struct _mesa_string_buffer *buf = _mesa_string_buffer_create(NULL, 256);
1352
1353 generate_shader_stats(device, variant, stage, buf);
1354
1355 fprintf(file, "\n%s:\n", radv_get_shader_name(&variant->info, stage));
1356 fprintf(file, "%s", buf->buf);
1357
1358 _mesa_string_buffer_destroy(buf);
1359 }
1360
1361 VkResult
1362 radv_GetShaderInfoAMD(VkDevice _device,
1363 VkPipeline _pipeline,
1364 VkShaderStageFlagBits shaderStage,
1365 VkShaderInfoTypeAMD infoType,
1366 size_t* pInfoSize,
1367 void* pInfo)
1368 {
1369 RADV_FROM_HANDLE(radv_device, device, _device);
1370 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1371 gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
1372 struct radv_shader_variant *variant = pipeline->shaders[stage];
1373 struct _mesa_string_buffer *buf;
1374 VkResult result = VK_SUCCESS;
1375
1376 /* Spec doesn't indicate what to do if the stage is invalid, so just
1377 * return no info for this. */
1378 if (!variant)
1379 return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
1380
1381 switch (infoType) {
1382 case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
1383 if (!pInfo) {
1384 *pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
1385 } else {
1386 unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
1387 struct ac_shader_config *conf = &variant->config;
1388
1389 VkShaderStatisticsInfoAMD statistics = {};
1390 statistics.shaderStageMask = shaderStage;
1391 statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
1392 statistics.numPhysicalSgprs = device->physical_device->rad_info.num_physical_sgprs_per_simd;
1393 statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
1394
1395 if (stage == MESA_SHADER_COMPUTE) {
1396 unsigned *local_size = variant->info.cs.block_size;
1397 unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
1398
1399 statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
1400 ceil((double)workgroup_size / statistics.numPhysicalVgprs);
1401
1402 statistics.computeWorkGroupSize[0] = local_size[0];
1403 statistics.computeWorkGroupSize[1] = local_size[1];
1404 statistics.computeWorkGroupSize[2] = local_size[2];
1405 } else {
1406 statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
1407 }
1408
1409 statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
1410 statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
1411 statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
1412 statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
1413 statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
1414
1415 size_t size = *pInfoSize;
1416 *pInfoSize = sizeof(statistics);
1417
1418 memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
1419
1420 if (size < *pInfoSize)
1421 result = VK_INCOMPLETE;
1422 }
1423
1424 break;
1425 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
1426 buf = _mesa_string_buffer_create(NULL, 1024);
1427
1428 _mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(&variant->info, stage));
1429 _mesa_string_buffer_printf(buf, "%s\n\n", variant->ir_string);
1430 _mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
1431 generate_shader_stats(device, variant, stage, buf);
1432
1433 /* Need to include the null terminator. */
1434 size_t length = buf->length + 1;
1435
1436 if (!pInfo) {
1437 *pInfoSize = length;
1438 } else {
1439 size_t size = *pInfoSize;
1440 *pInfoSize = length;
1441
1442 memcpy(pInfo, buf->buf, MIN2(size, length));
1443
1444 if (size < length)
1445 result = VK_INCOMPLETE;
1446 }
1447
1448 _mesa_string_buffer_destroy(buf);
1449 break;
1450 default:
1451 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
1452 result = VK_ERROR_FEATURE_NOT_PRESENT;
1453 break;
1454 }
1455
1456 return result;
1457 }