radv: determine shaders wavesize at pipeline level
[mesa.git] / src / amd / vulkan / radv_shader.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
34 #include "nir/nir.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
37
38 #include <llvm-c/Core.h>
39 #include <llvm-c/TargetMachine.h>
40 #include <llvm-c/Support.h>
41
42 #include "sid.h"
43 #include "ac_binary.h"
44 #include "ac_llvm_util.h"
45 #include "ac_nir_to_llvm.h"
46 #include "ac_rtld.h"
47 #include "vk_format.h"
48 #include "util/debug.h"
49 #include "ac_exp_param.h"
50
51 #include "aco_interface.h"
52
53 #include "util/string_buffer.h"
54
55 static const struct nir_shader_compiler_options nir_options_llvm = {
56 .vertex_id_zero_based = true,
57 .lower_scmp = true,
58 .lower_flrp16 = true,
59 .lower_flrp32 = true,
60 .lower_flrp64 = true,
61 .lower_device_index_to_zero = true,
62 .lower_fsat = true,
63 .lower_fdiv = true,
64 .lower_fmod = true,
65 .lower_bitfield_insert_to_bitfield_select = true,
66 .lower_bitfield_extract = true,
67 .lower_sub = true,
68 .lower_pack_snorm_2x16 = true,
69 .lower_pack_snorm_4x8 = true,
70 .lower_pack_unorm_2x16 = true,
71 .lower_pack_unorm_4x8 = true,
72 .lower_unpack_snorm_2x16 = true,
73 .lower_unpack_snorm_4x8 = true,
74 .lower_unpack_unorm_2x16 = true,
75 .lower_unpack_unorm_4x8 = true,
76 .lower_extract_byte = true,
77 .lower_extract_word = true,
78 .lower_ffma = true,
79 .lower_fpow = true,
80 .lower_mul_2x32_64 = true,
81 .lower_rotate = true,
82 .max_unroll_iterations = 32,
83 .use_interpolated_input_intrinsics = true,
84 };
85
86 static const struct nir_shader_compiler_options nir_options_aco = {
87 .vertex_id_zero_based = true,
88 .lower_scmp = true,
89 .lower_flrp16 = true,
90 .lower_flrp32 = true,
91 .lower_flrp64 = true,
92 .lower_device_index_to_zero = true,
93 .lower_fdiv = true,
94 .lower_fmod = true,
95 .lower_bitfield_insert_to_bitfield_select = true,
96 .lower_bitfield_extract = true,
97 .lower_pack_snorm_2x16 = true,
98 .lower_pack_snorm_4x8 = true,
99 .lower_pack_unorm_2x16 = true,
100 .lower_pack_unorm_4x8 = true,
101 .lower_unpack_snorm_2x16 = true,
102 .lower_unpack_snorm_4x8 = true,
103 .lower_unpack_unorm_2x16 = true,
104 .lower_unpack_unorm_4x8 = true,
105 .lower_unpack_half_2x16 = true,
106 .lower_extract_byte = true,
107 .lower_extract_word = true,
108 .lower_ffma = true,
109 .lower_fpow = true,
110 .lower_mul_2x32_64 = true,
111 .lower_rotate = true,
112 .max_unroll_iterations = 32,
113 .use_interpolated_input_intrinsics = true,
114 };
115
116 bool
117 radv_can_dump_shader(struct radv_device *device,
118 struct radv_shader_module *module,
119 bool is_gs_copy_shader)
120 {
121 if (!(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS))
122 return false;
123 if (module)
124 return !module->nir ||
125 (device->instance->debug_flags & RADV_DEBUG_DUMP_META_SHADERS);
126
127 return is_gs_copy_shader;
128 }
129
130 bool
131 radv_can_dump_shader_stats(struct radv_device *device,
132 struct radv_shader_module *module)
133 {
134 /* Only dump non-meta shader stats. */
135 return device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS &&
136 module && !module->nir;
137 }
138
139 unsigned shader_io_get_unique_index(gl_varying_slot slot)
140 {
141 /* handle patch indices separate */
142 if (slot == VARYING_SLOT_TESS_LEVEL_OUTER)
143 return 0;
144 if (slot == VARYING_SLOT_TESS_LEVEL_INNER)
145 return 1;
146 if (slot >= VARYING_SLOT_PATCH0 && slot <= VARYING_SLOT_TESS_MAX)
147 return 2 + (slot - VARYING_SLOT_PATCH0);
148 if (slot == VARYING_SLOT_POS)
149 return 0;
150 if (slot == VARYING_SLOT_PSIZ)
151 return 1;
152 if (slot == VARYING_SLOT_CLIP_DIST0)
153 return 2;
154 if (slot == VARYING_SLOT_CLIP_DIST1)
155 return 3;
156 /* 3 is reserved for clip dist as well */
157 if (slot >= VARYING_SLOT_VAR0 && slot <= VARYING_SLOT_VAR31)
158 return 4 + (slot - VARYING_SLOT_VAR0);
159 unreachable("illegal slot in get unique index\n");
160 }
161
162 VkResult radv_CreateShaderModule(
163 VkDevice _device,
164 const VkShaderModuleCreateInfo* pCreateInfo,
165 const VkAllocationCallbacks* pAllocator,
166 VkShaderModule* pShaderModule)
167 {
168 RADV_FROM_HANDLE(radv_device, device, _device);
169 struct radv_shader_module *module;
170
171 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
172 assert(pCreateInfo->flags == 0);
173
174 module = vk_alloc2(&device->alloc, pAllocator,
175 sizeof(*module) + pCreateInfo->codeSize, 8,
176 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
177 if (module == NULL)
178 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
179
180 module->nir = NULL;
181 module->size = pCreateInfo->codeSize;
182 memcpy(module->data, pCreateInfo->pCode, module->size);
183
184 _mesa_sha1_compute(module->data, module->size, module->sha1);
185
186 *pShaderModule = radv_shader_module_to_handle(module);
187
188 return VK_SUCCESS;
189 }
190
191 void radv_DestroyShaderModule(
192 VkDevice _device,
193 VkShaderModule _module,
194 const VkAllocationCallbacks* pAllocator)
195 {
196 RADV_FROM_HANDLE(radv_device, device, _device);
197 RADV_FROM_HANDLE(radv_shader_module, module, _module);
198
199 if (!module)
200 return;
201
202 vk_free2(&device->alloc, pAllocator, module);
203 }
204
205 void
206 radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
207 bool allow_copies)
208 {
209 bool progress;
210 unsigned lower_flrp =
211 (shader->options->lower_flrp16 ? 16 : 0) |
212 (shader->options->lower_flrp32 ? 32 : 0) |
213 (shader->options->lower_flrp64 ? 64 : 0);
214
215 do {
216 progress = false;
217
218 NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
219 NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
220
221 NIR_PASS_V(shader, nir_lower_vars_to_ssa);
222 NIR_PASS_V(shader, nir_lower_pack);
223
224 if (allow_copies) {
225 /* Only run this pass in the first call to
226 * radv_optimize_nir. Later calls assume that we've
227 * lowered away any copy_deref instructions and we
228 * don't want to introduce any more.
229 */
230 NIR_PASS(progress, shader, nir_opt_find_array_copies);
231 }
232
233 NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
234 NIR_PASS(progress, shader, nir_opt_dead_write_vars);
235 NIR_PASS(progress, shader, nir_remove_dead_variables,
236 nir_var_function_temp);
237
238 NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL, NULL);
239 NIR_PASS_V(shader, nir_lower_phis_to_scalar);
240
241 NIR_PASS(progress, shader, nir_copy_prop);
242 NIR_PASS(progress, shader, nir_opt_remove_phis);
243 NIR_PASS(progress, shader, nir_opt_dce);
244 if (nir_opt_trivial_continues(shader)) {
245 progress = true;
246 NIR_PASS(progress, shader, nir_copy_prop);
247 NIR_PASS(progress, shader, nir_opt_remove_phis);
248 NIR_PASS(progress, shader, nir_opt_dce);
249 }
250 NIR_PASS(progress, shader, nir_opt_if, true);
251 NIR_PASS(progress, shader, nir_opt_dead_cf);
252 NIR_PASS(progress, shader, nir_opt_cse);
253 NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
254 NIR_PASS(progress, shader, nir_opt_constant_folding);
255 NIR_PASS(progress, shader, nir_opt_algebraic);
256
257 if (lower_flrp != 0) {
258 bool lower_flrp_progress = false;
259 NIR_PASS(lower_flrp_progress,
260 shader,
261 nir_lower_flrp,
262 lower_flrp,
263 false /* always_precise */,
264 shader->options->lower_ffma);
265 if (lower_flrp_progress) {
266 NIR_PASS(progress, shader,
267 nir_opt_constant_folding);
268 progress = true;
269 }
270
271 /* Nothing should rematerialize any flrps, so we only
272 * need to do this lowering once.
273 */
274 lower_flrp = 0;
275 }
276
277 NIR_PASS(progress, shader, nir_opt_undef);
278 if (shader->options->max_unroll_iterations) {
279 NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
280 }
281 } while (progress && !optimize_conservatively);
282
283 NIR_PASS(progress, shader, nir_opt_conditional_discard);
284 NIR_PASS(progress, shader, nir_opt_shrink_load);
285 NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
286 }
287
288 nir_shader *
289 radv_shader_compile_to_nir(struct radv_device *device,
290 struct radv_shader_module *module,
291 const char *entrypoint_name,
292 gl_shader_stage stage,
293 const VkSpecializationInfo *spec_info,
294 const VkPipelineCreateFlags flags,
295 const struct radv_pipeline_layout *layout,
296 bool use_aco)
297 {
298 nir_shader *nir;
299 const nir_shader_compiler_options *nir_options = use_aco ? &nir_options_aco :
300 &nir_options_llvm;
301 if (module->nir) {
302 /* Some things such as our meta clear/blit code will give us a NIR
303 * shader directly. In that case, we just ignore the SPIR-V entirely
304 * and just use the NIR shader */
305 nir = module->nir;
306 nir->options = nir_options;
307 nir_validate_shader(nir, "in internal shader");
308
309 assert(exec_list_length(&nir->functions) == 1);
310 } else {
311 uint32_t *spirv = (uint32_t *) module->data;
312 assert(module->size % 4 == 0);
313
314 if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
315 radv_print_spirv(module->data, module->size, stderr);
316
317 uint32_t num_spec_entries = 0;
318 struct nir_spirv_specialization *spec_entries = NULL;
319 if (spec_info && spec_info->mapEntryCount > 0) {
320 num_spec_entries = spec_info->mapEntryCount;
321 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
322 for (uint32_t i = 0; i < num_spec_entries; i++) {
323 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
324 const void *data = spec_info->pData + entry.offset;
325 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
326
327 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
328 if (spec_info->dataSize == 8)
329 spec_entries[i].data64 = *(const uint64_t *)data;
330 else
331 spec_entries[i].data32 = *(const uint32_t *)data;
332 }
333 }
334 const struct spirv_to_nir_options spirv_options = {
335 .lower_ubo_ssbo_access_to_offsets = true,
336 .caps = {
337 .amd_gcn_shader = true,
338 .amd_shader_ballot = device->physical_device->use_shader_ballot,
339 .amd_trinary_minmax = true,
340 .demote_to_helper_invocation = device->physical_device->use_aco,
341 .derivative_group = true,
342 .descriptor_array_dynamic_indexing = true,
343 .descriptor_array_non_uniform_indexing = true,
344 .descriptor_indexing = true,
345 .device_group = true,
346 .draw_parameters = true,
347 .float_controls = true,
348 .float16 = !device->physical_device->use_aco,
349 .float64 = true,
350 .geometry_streams = true,
351 .image_read_without_format = true,
352 .image_write_without_format = true,
353 .int8 = !device->physical_device->use_aco,
354 .int16 = !device->physical_device->use_aco,
355 .int64 = true,
356 .int64_atomics = true,
357 .multiview = true,
358 .physical_storage_buffer_address = true,
359 .post_depth_coverage = true,
360 .runtime_descriptor_array = true,
361 .shader_clock = true,
362 .shader_viewport_index_layer = true,
363 .stencil_export = true,
364 .storage_8bit = !device->physical_device->use_aco,
365 .storage_16bit = !device->physical_device->use_aco,
366 .storage_image_ms = true,
367 .subgroup_arithmetic = true,
368 .subgroup_ballot = true,
369 .subgroup_basic = true,
370 .subgroup_quad = true,
371 .subgroup_shuffle = true,
372 .subgroup_vote = true,
373 .tessellation = true,
374 .transform_feedback = true,
375 .variable_pointers = true,
376 },
377 .ubo_addr_format = nir_address_format_32bit_index_offset,
378 .ssbo_addr_format = nir_address_format_32bit_index_offset,
379 .phys_ssbo_addr_format = nir_address_format_64bit_global,
380 .push_const_addr_format = nir_address_format_logical,
381 .shared_addr_format = nir_address_format_32bit_offset,
382 .frag_coord_is_sysval = true,
383 };
384 nir = spirv_to_nir(spirv, module->size / 4,
385 spec_entries, num_spec_entries,
386 stage, entrypoint_name,
387 &spirv_options, nir_options);
388 assert(nir->info.stage == stage);
389 nir_validate_shader(nir, "after spirv_to_nir");
390
391 free(spec_entries);
392
393 /* We have to lower away local constant initializers right before we
394 * inline functions. That way they get properly initialized at the top
395 * of the function and not at the top of its caller.
396 */
397 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
398 NIR_PASS_V(nir, nir_lower_returns);
399 NIR_PASS_V(nir, nir_inline_functions);
400 NIR_PASS_V(nir, nir_opt_deref);
401
402 /* Pick off the single entrypoint that we want */
403 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
404 if (func->is_entrypoint)
405 func->name = ralloc_strdup(func, "main");
406 else
407 exec_node_remove(&func->node);
408 }
409 assert(exec_list_length(&nir->functions) == 1);
410
411 /* Make sure we lower constant initializers on output variables so that
412 * nir_remove_dead_variables below sees the corresponding stores
413 */
414 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
415
416 /* Now that we've deleted all but the main function, we can go ahead and
417 * lower the rest of the constant initializers.
418 */
419 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
420
421 /* Split member structs. We do this before lower_io_to_temporaries so that
422 * it doesn't lower system values to temporaries by accident.
423 */
424 NIR_PASS_V(nir, nir_split_var_copies);
425 NIR_PASS_V(nir, nir_split_per_member_structs);
426
427 if (nir->info.stage == MESA_SHADER_FRAGMENT && use_aco)
428 NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out);
429 if (nir->info.stage == MESA_SHADER_FRAGMENT)
430 NIR_PASS_V(nir, nir_lower_input_attachments, true);
431
432 NIR_PASS_V(nir, nir_remove_dead_variables,
433 nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
434
435 NIR_PASS_V(nir, nir_propagate_invariant);
436
437 NIR_PASS_V(nir, nir_lower_system_values);
438 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
439 NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
440 }
441
442 /* Vulkan uses the separate-shader linking model */
443 nir->info.separate_shader = true;
444
445 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
446
447 static const nir_lower_tex_options tex_options = {
448 .lower_txp = ~0,
449 .lower_tg4_offsets = true,
450 };
451
452 nir_lower_tex(nir, &tex_options);
453
454 nir_lower_vars_to_ssa(nir);
455
456 if (nir->info.stage == MESA_SHADER_VERTEX ||
457 nir->info.stage == MESA_SHADER_GEOMETRY ||
458 nir->info.stage == MESA_SHADER_FRAGMENT) {
459 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
460 nir_shader_get_entrypoint(nir), true, true);
461 } else if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
462 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
463 nir_shader_get_entrypoint(nir), true, false);
464 }
465
466 nir_split_var_copies(nir);
467
468 nir_lower_global_vars_to_local(nir);
469 nir_remove_dead_variables(nir, nir_var_function_temp);
470 nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
471 .subgroup_size = 64,
472 .ballot_bit_size = 64,
473 .lower_to_scalar = 1,
474 .lower_subgroup_masks = 1,
475 .lower_shuffle = 1,
476 .lower_shuffle_to_32bit = 1,
477 .lower_vote_eq_to_ballot = 1,
478 });
479
480 nir_lower_load_const_to_scalar(nir);
481
482 if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
483 radv_optimize_nir(nir, false, true);
484
485 /* We call nir_lower_var_copies() after the first radv_optimize_nir()
486 * to remove any copies introduced by nir_opt_find_array_copies().
487 */
488 nir_lower_var_copies(nir);
489
490 /* Lower large variables that are always constant with load_constant
491 * intrinsics, which get turned into PC-relative loads from a data
492 * section next to the shader.
493 */
494 NIR_PASS_V(nir, nir_opt_large_constants,
495 glsl_get_natural_size_align_bytes, 16);
496
497 /* Indirect lowering must be called after the radv_optimize_nir() loop
498 * has been called at least once. Otherwise indirect lowering can
499 * bloat the instruction count of the loop and cause it to be
500 * considered too large for unrolling.
501 */
502 ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
503 radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, false);
504
505 return nir;
506 }
507
508 static int
509 type_size_vec4(const struct glsl_type *type, bool bindless)
510 {
511 return glsl_count_attribute_slots(type, false);
512 }
513
514 static nir_variable *
515 find_layer_in_var(nir_shader *nir)
516 {
517 nir_foreach_variable(var, &nir->inputs) {
518 if (var->data.location == VARYING_SLOT_LAYER) {
519 return var;
520 }
521 }
522
523 nir_variable *var =
524 nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
525 var->data.location = VARYING_SLOT_LAYER;
526 var->data.interpolation = INTERP_MODE_FLAT;
527 return var;
528 }
529
530 /* We use layered rendering to implement multiview, which means we need to map
531 * view_index to gl_Layer. The attachment lowering also uses needs to know the
532 * layer so that it can sample from the correct layer. The code generates a
533 * load from the layer_id sysval, but since we don't have a way to get at this
534 * information from the fragment shader, we also need to lower this to the
535 * gl_Layer varying. This pass lowers both to a varying load from the LAYER
536 * slot, before lowering io, so that nir_assign_var_locations() will give the
537 * LAYER varying the correct driver_location.
538 */
539
540 static bool
541 lower_view_index(nir_shader *nir)
542 {
543 bool progress = false;
544 nir_function_impl *entry = nir_shader_get_entrypoint(nir);
545 nir_builder b;
546 nir_builder_init(&b, entry);
547
548 nir_variable *layer = NULL;
549 nir_foreach_block(block, entry) {
550 nir_foreach_instr_safe(instr, block) {
551 if (instr->type != nir_instr_type_intrinsic)
552 continue;
553
554 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
555 if (load->intrinsic != nir_intrinsic_load_view_index &&
556 load->intrinsic != nir_intrinsic_load_layer_id)
557 continue;
558
559 if (!layer)
560 layer = find_layer_in_var(nir);
561
562 b.cursor = nir_before_instr(instr);
563 nir_ssa_def *def = nir_load_var(&b, layer);
564 nir_ssa_def_rewrite_uses(&load->dest.ssa,
565 nir_src_for_ssa(def));
566
567 nir_instr_remove(instr);
568 progress = true;
569 }
570 }
571
572 return progress;
573 }
574
575 void
576 radv_lower_fs_io(nir_shader *nir)
577 {
578 NIR_PASS_V(nir, lower_view_index);
579 nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs,
580 MESA_SHADER_FRAGMENT);
581
582 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
583
584 /* This pass needs actual constants */
585 nir_opt_constant_folding(nir);
586
587 NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
588 }
589
590
591 void *
592 radv_alloc_shader_memory(struct radv_device *device,
593 struct radv_shader_variant *shader)
594 {
595 mtx_lock(&device->shader_slab_mutex);
596 list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
597 uint64_t offset = 0;
598 list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
599 if (s->bo_offset - offset >= shader->code_size) {
600 shader->bo = slab->bo;
601 shader->bo_offset = offset;
602 list_addtail(&shader->slab_list, &s->slab_list);
603 mtx_unlock(&device->shader_slab_mutex);
604 return slab->ptr + offset;
605 }
606 offset = align_u64(s->bo_offset + s->code_size, 256);
607 }
608 if (slab->size - offset >= shader->code_size) {
609 shader->bo = slab->bo;
610 shader->bo_offset = offset;
611 list_addtail(&shader->slab_list, &slab->shaders);
612 mtx_unlock(&device->shader_slab_mutex);
613 return slab->ptr + offset;
614 }
615 }
616
617 mtx_unlock(&device->shader_slab_mutex);
618 struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
619
620 slab->size = 256 * 1024;
621 slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
622 RADEON_DOMAIN_VRAM,
623 RADEON_FLAG_NO_INTERPROCESS_SHARING |
624 (device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
625 0 : RADEON_FLAG_READ_ONLY),
626 RADV_BO_PRIORITY_SHADER);
627 slab->ptr = (char*)device->ws->buffer_map(slab->bo);
628 list_inithead(&slab->shaders);
629
630 mtx_lock(&device->shader_slab_mutex);
631 list_add(&slab->slabs, &device->shader_slabs);
632
633 shader->bo = slab->bo;
634 shader->bo_offset = 0;
635 list_add(&shader->slab_list, &slab->shaders);
636 mtx_unlock(&device->shader_slab_mutex);
637 return slab->ptr;
638 }
639
640 void
641 radv_destroy_shader_slabs(struct radv_device *device)
642 {
643 list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
644 device->ws->buffer_destroy(slab->bo);
645 free(slab);
646 }
647 mtx_destroy(&device->shader_slab_mutex);
648 }
649
650 /* For the UMR disassembler. */
651 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
652 #define DEBUGGER_NUM_MARKERS 5
653
654 static unsigned
655 radv_get_shader_binary_size(size_t code_size)
656 {
657 return code_size + DEBUGGER_NUM_MARKERS * 4;
658 }
659
660 static void radv_postprocess_config(const struct radv_physical_device *pdevice,
661 const struct ac_shader_config *config_in,
662 const struct radv_shader_info *info,
663 gl_shader_stage stage,
664 struct ac_shader_config *config_out)
665 {
666 bool scratch_enabled = config_in->scratch_bytes_per_wave > 0;
667 unsigned vgpr_comp_cnt = 0;
668 unsigned num_input_vgprs = info->num_input_vgprs;
669
670 if (stage == MESA_SHADER_FRAGMENT) {
671 num_input_vgprs = ac_get_fs_input_vgpr_cnt(config_in, NULL, NULL);
672 }
673
674 unsigned num_vgprs = MAX2(config_in->num_vgprs, num_input_vgprs);
675 /* +3 for scratch wave offset and VCC */
676 unsigned num_sgprs = MAX2(config_in->num_sgprs, info->num_input_sgprs + 3);
677 unsigned num_shared_vgprs = config_in->num_shared_vgprs;
678 /* shared VGPRs are introduced in Navi and are allocated in blocks of 8 (RDNA ref 3.6.5) */
679 assert((pdevice->rad_info.chip_class >= GFX10 && num_shared_vgprs % 8 == 0)
680 || (pdevice->rad_info.chip_class < GFX10 && num_shared_vgprs == 0));
681 unsigned num_shared_vgpr_blocks = num_shared_vgprs / 8;
682
683 *config_out = *config_in;
684 config_out->num_vgprs = num_vgprs;
685 config_out->num_sgprs = num_sgprs;
686 config_out->num_shared_vgprs = num_shared_vgprs;
687
688 /* Enable 64-bit and 16-bit denormals, because there is no performance
689 * cost.
690 *
691 * If denormals are enabled, all floating-point output modifiers are
692 * ignored.
693 *
694 * Don't enable denormals for 32-bit floats, because:
695 * - Floating-point output modifiers would be ignored by the hw.
696 * - Some opcodes don't support denormals, such as v_mad_f32. We would
697 * have to stop using those.
698 * - GFX6 & GFX7 would be very slow.
699 */
700 config_out->float_mode |= V_00B028_FP_64_DENORMS;
701
702 config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
703 S_00B12C_SCRATCH_EN(scratch_enabled);
704
705 if (!pdevice->use_ngg_streamout) {
706 config_out->rsrc2 |= S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
707 S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
708 S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
709 S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
710 S_00B12C_SO_EN(!!info->so.num_outputs);
711 }
712
713 config_out->rsrc1 = S_00B848_VGPRS((num_vgprs - 1) /
714 (info->wave_size == 32 ? 8 : 4)) |
715 S_00B848_DX10_CLAMP(1) |
716 S_00B848_FLOAT_MODE(config_out->float_mode);
717
718 if (pdevice->rad_info.chip_class >= GFX10) {
719 config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(info->num_user_sgprs >> 5);
720 } else {
721 config_out->rsrc1 |= S_00B228_SGPRS((num_sgprs - 1) / 8);
722 config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(info->num_user_sgprs >> 5);
723 }
724
725 switch (stage) {
726 case MESA_SHADER_TESS_EVAL:
727 if (info->is_ngg) {
728 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
729 config_out->rsrc2 |= S_00B22C_OC_LDS_EN(1);
730 } else if (info->tes.as_es) {
731 assert(pdevice->rad_info.chip_class <= GFX8);
732 vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
733
734 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
735 } else {
736 bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
737 vgpr_comp_cnt = enable_prim_id ? 3 : 2;
738
739 config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
740 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
741 }
742 config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
743 break;
744 case MESA_SHADER_TESS_CTRL:
745 if (pdevice->rad_info.chip_class >= GFX9) {
746 /* We need at least 2 components for LS.
747 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
748 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
749 */
750 if (pdevice->rad_info.chip_class >= GFX10) {
751 vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1;
752 } else {
753 vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
754 }
755 } else {
756 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
757 }
758 config_out->rsrc1 |= S_00B428_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
759 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
760 config_out->rsrc2 |= S_00B42C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
761 break;
762 case MESA_SHADER_VERTEX:
763 if (info->is_ngg) {
764 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
765 } else if (info->vs.as_ls) {
766 assert(pdevice->rad_info.chip_class <= GFX8);
767 /* We need at least 2 components for LS.
768 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
769 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
770 */
771 vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
772 } else if (info->vs.as_es) {
773 assert(pdevice->rad_info.chip_class <= GFX8);
774 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
775 vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
776 } else {
777 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
778 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
779 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
780 */
781 if (info->vs.needs_instance_id && pdevice->rad_info.chip_class >= GFX10) {
782 vgpr_comp_cnt = 3;
783 } else if (info->vs.export_prim_id) {
784 vgpr_comp_cnt = 2;
785 } else if (info->vs.needs_instance_id) {
786 vgpr_comp_cnt = 1;
787 } else {
788 vgpr_comp_cnt = 0;
789 }
790
791 config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
792 config_out->rsrc2 |= S_00B12C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
793 }
794 break;
795 case MESA_SHADER_FRAGMENT:
796 config_out->rsrc1 |= S_00B028_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
797 config_out->rsrc2 |= S_00B02C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
798 break;
799 case MESA_SHADER_GEOMETRY:
800 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
801 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
802 config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
803 break;
804 case MESA_SHADER_COMPUTE:
805 config_out->rsrc1 |= S_00B848_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
806 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
807 config_out->rsrc2 |=
808 S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
809 S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
810 S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
811 S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
812 info->cs.uses_thread_id[1] ? 1 : 0) |
813 S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
814 S_00B84C_LDS_SIZE(config_in->lds_size);
815 config_out->rsrc3 |= S_00B8A0_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
816
817 break;
818 default:
819 unreachable("unsupported shader type");
820 break;
821 }
822
823 if (pdevice->rad_info.chip_class >= GFX10 && info->is_ngg &&
824 (stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_GEOMETRY)) {
825 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
826 gl_shader_stage es_stage = stage;
827 if (stage == MESA_SHADER_GEOMETRY)
828 es_stage = info->gs.es_type;
829
830 /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
831 if (es_stage == MESA_SHADER_VERTEX) {
832 es_vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 0;
833 } else if (es_stage == MESA_SHADER_TESS_EVAL) {
834 bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
835 es_vgpr_comp_cnt = enable_prim_id ? 3 : 2;
836 } else
837 unreachable("Unexpected ES shader stage");
838
839 bool tes_triangles = stage == MESA_SHADER_TESS_EVAL &&
840 info->tes.primitive_mode >= 4; /* GL_TRIANGLES */
841 if (info->uses_invocation_id || stage == MESA_SHADER_VERTEX) {
842 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
843 } else if (info->uses_prim_id) {
844 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
845 } else if (info->gs.vertices_in >= 3 || tes_triangles) {
846 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
847 } else {
848 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
849 }
850
851 config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt) |
852 S_00B228_WGP_MODE(1);
853 config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
854 S_00B22C_LDS_SIZE(config_in->lds_size) |
855 S_00B22C_OC_LDS_EN(es_stage == MESA_SHADER_TESS_EVAL);
856 } else if (pdevice->rad_info.chip_class >= GFX9 &&
857 stage == MESA_SHADER_GEOMETRY) {
858 unsigned es_type = info->gs.es_type;
859 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
860
861 if (es_type == MESA_SHADER_VERTEX) {
862 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
863 if (info->vs.needs_instance_id) {
864 es_vgpr_comp_cnt = pdevice->rad_info.chip_class >= GFX10 ? 3 : 1;
865 } else {
866 es_vgpr_comp_cnt = 0;
867 }
868 } else if (es_type == MESA_SHADER_TESS_EVAL) {
869 es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
870 } else {
871 unreachable("invalid shader ES type");
872 }
873
874 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
875 * VGPR[0:4] are always loaded.
876 */
877 if (info->uses_invocation_id) {
878 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
879 } else if (info->uses_prim_id) {
880 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
881 } else if (info->gs.vertices_in >= 3) {
882 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
883 } else {
884 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
885 }
886
887 config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
888 config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
889 S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
890 } else if (pdevice->rad_info.chip_class >= GFX9 &&
891 stage == MESA_SHADER_TESS_CTRL) {
892 config_out->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
893 } else {
894 config_out->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
895 }
896 }
897
898 struct radv_shader_variant *
899 radv_shader_variant_create(struct radv_device *device,
900 const struct radv_shader_binary *binary,
901 bool keep_shader_info)
902 {
903 struct ac_shader_config config = {0};
904 struct ac_rtld_binary rtld_binary = {0};
905 struct radv_shader_variant *variant = calloc(1, sizeof(struct radv_shader_variant));
906 if (!variant)
907 return NULL;
908
909 variant->ref_count = 1;
910
911 if (binary->type == RADV_BINARY_TYPE_RTLD) {
912 struct ac_rtld_symbol lds_symbols[2];
913 unsigned num_lds_symbols = 0;
914 const char *elf_data = (const char *)((struct radv_shader_binary_rtld *)binary)->data;
915 size_t elf_size = ((struct radv_shader_binary_rtld *)binary)->elf_size;
916
917 if (device->physical_device->rad_info.chip_class >= GFX9 &&
918 (binary->stage == MESA_SHADER_GEOMETRY || binary->info.is_ngg) &&
919 !binary->is_gs_copy_shader) {
920 /* We add this symbol even on LLVM <= 8 to ensure that
921 * shader->config.lds_size is set correctly below.
922 */
923 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
924 sym->name = "esgs_ring";
925 sym->size = binary->info.ngg_info.esgs_ring_size;
926 sym->align = 64 * 1024;
927 }
928
929 if (binary->info.is_ngg &&
930 binary->stage == MESA_SHADER_GEOMETRY) {
931 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
932 sym->name = "ngg_emit";
933 sym->size = binary->info.ngg_info.ngg_emit_size * 4;
934 sym->align = 4;
935 }
936
937 struct ac_rtld_open_info open_info = {
938 .info = &device->physical_device->rad_info,
939 .shader_type = binary->stage,
940 .wave_size = binary->info.wave_size,
941 .num_parts = 1,
942 .elf_ptrs = &elf_data,
943 .elf_sizes = &elf_size,
944 .num_shared_lds_symbols = num_lds_symbols,
945 .shared_lds_symbols = lds_symbols,
946 };
947
948 if (!ac_rtld_open(&rtld_binary, open_info)) {
949 free(variant);
950 return NULL;
951 }
952
953 if (!ac_rtld_read_config(&rtld_binary, &config)) {
954 ac_rtld_close(&rtld_binary);
955 free(variant);
956 return NULL;
957 }
958
959 if (rtld_binary.lds_size > 0) {
960 unsigned alloc_granularity = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
961 config.lds_size = align(rtld_binary.lds_size, alloc_granularity) / alloc_granularity;
962 }
963
964 variant->code_size = rtld_binary.rx_size;
965 variant->exec_size = rtld_binary.exec_size;
966 } else {
967 assert(binary->type == RADV_BINARY_TYPE_LEGACY);
968 config = ((struct radv_shader_binary_legacy *)binary)->config;
969 variant->code_size = radv_get_shader_binary_size(((struct radv_shader_binary_legacy *)binary)->code_size);
970 variant->exec_size = ((struct radv_shader_binary_legacy *)binary)->exec_size;
971 }
972
973 variant->info = binary->info;
974 radv_postprocess_config(device->physical_device, &config, &binary->info,
975 binary->stage, &variant->config);
976
977 if (radv_device_use_secure_compile(device->instance)) {
978 if (binary->type == RADV_BINARY_TYPE_RTLD)
979 ac_rtld_close(&rtld_binary);
980
981 return variant;
982 }
983
984 void *dest_ptr = radv_alloc_shader_memory(device, variant);
985
986 if (binary->type == RADV_BINARY_TYPE_RTLD) {
987 struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;
988 struct ac_rtld_upload_info info = {
989 .binary = &rtld_binary,
990 .rx_va = radv_buffer_get_va(variant->bo) + variant->bo_offset,
991 .rx_ptr = dest_ptr,
992 };
993
994 if (!ac_rtld_upload(&info)) {
995 radv_shader_variant_destroy(device, variant);
996 ac_rtld_close(&rtld_binary);
997 return NULL;
998 }
999
1000 if (keep_shader_info ||
1001 (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS)) {
1002 const char *disasm_data;
1003 size_t disasm_size;
1004 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm_data, &disasm_size)) {
1005 radv_shader_variant_destroy(device, variant);
1006 ac_rtld_close(&rtld_binary);
1007 return NULL;
1008 }
1009
1010 variant->ir_string = bin->llvm_ir_size ? strdup((const char*)(bin->data + bin->elf_size)) : NULL;
1011 variant->disasm_string = malloc(disasm_size + 1);
1012 memcpy(variant->disasm_string, disasm_data, disasm_size);
1013 variant->disasm_string[disasm_size] = 0;
1014 }
1015
1016 ac_rtld_close(&rtld_binary);
1017 } else {
1018 struct radv_shader_binary_legacy* bin = (struct radv_shader_binary_legacy *)binary;
1019 memcpy(dest_ptr, bin->data, bin->code_size);
1020
1021 /* Add end-of-code markers for the UMR disassembler. */
1022 uint32_t *ptr32 = (uint32_t *)dest_ptr + bin->code_size / 4;
1023 for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
1024 ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
1025
1026 variant->ir_string = bin->ir_size ? strdup((const char*)(bin->data + bin->code_size)) : NULL;
1027 variant->disasm_string = bin->disasm_size ? strdup((const char*)(bin->data + bin->code_size + bin->ir_size)) : NULL;
1028 }
1029 return variant;
1030 }
1031
1032 static char *
1033 radv_dump_nir_shaders(struct nir_shader * const *shaders,
1034 int shader_count)
1035 {
1036 char *data = NULL;
1037 char *ret = NULL;
1038 size_t size = 0;
1039 FILE *f = open_memstream(&data, &size);
1040 if (f) {
1041 for (int i = 0; i < shader_count; ++i)
1042 nir_print_shader(shaders[i], f);
1043 fclose(f);
1044 }
1045
1046 ret = malloc(size + 1);
1047 if (ret) {
1048 memcpy(ret, data, size);
1049 ret[size] = 0;
1050 }
1051 free(data);
1052 return ret;
1053 }
1054
1055 static struct radv_shader_variant *
1056 shader_variant_compile(struct radv_device *device,
1057 struct radv_shader_module *module,
1058 struct nir_shader * const *shaders,
1059 int shader_count,
1060 gl_shader_stage stage,
1061 struct radv_shader_info *info,
1062 struct radv_nir_compiler_options *options,
1063 bool gs_copy_shader,
1064 bool keep_shader_info,
1065 bool use_aco,
1066 struct radv_shader_binary **binary_out)
1067 {
1068 enum radeon_family chip_family = device->physical_device->rad_info.family;
1069 struct radv_shader_binary *binary = NULL;
1070
1071 options->family = chip_family;
1072 options->chip_class = device->physical_device->rad_info.chip_class;
1073 options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
1074 options->dump_preoptir = options->dump_shader &&
1075 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
1076 options->record_ir = keep_shader_info;
1077 options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
1078 options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
1079 options->address32_hi = device->physical_device->rad_info.address32_hi;
1080 options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug;
1081 options->use_ngg_streamout = device->physical_device->use_ngg_streamout;
1082
1083 if (!use_aco || options->dump_shader || options->record_ir)
1084 ac_init_llvm_once();
1085
1086 if (use_aco) {
1087 aco_compile_shader(shader_count, shaders, &binary, info, options);
1088 binary->info = *info;
1089 } else {
1090 enum ac_target_machine_options tm_options = 0;
1091 struct ac_llvm_compiler ac_llvm;
1092 bool thread_compiler;
1093
1094 if (options->supports_spill)
1095 tm_options |= AC_TM_SUPPORTS_SPILL;
1096 if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
1097 tm_options |= AC_TM_SISCHED;
1098 if (options->check_ir)
1099 tm_options |= AC_TM_CHECK_IR;
1100 if (device->instance->debug_flags & RADV_DEBUG_NO_LOAD_STORE_OPT)
1101 tm_options |= AC_TM_NO_LOAD_STORE_OPT;
1102
1103 thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
1104 radv_init_llvm_compiler(&ac_llvm,
1105 thread_compiler,
1106 chip_family, tm_options,
1107 info->wave_size);
1108
1109 if (gs_copy_shader) {
1110 assert(shader_count == 1);
1111 radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
1112 info, options);
1113 } else {
1114 radv_compile_nir_shader(&ac_llvm, &binary, info,
1115 shaders, shader_count, options);
1116 }
1117
1118 binary->info = *info;
1119 radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
1120 }
1121
1122 struct radv_shader_variant *variant = radv_shader_variant_create(device, binary,
1123 keep_shader_info);
1124 if (!variant) {
1125 free(binary);
1126 return NULL;
1127 }
1128 variant->aco_used = use_aco;
1129
1130 if (options->dump_shader) {
1131 fprintf(stderr, "disasm:\n%s\n", variant->disasm_string);
1132 }
1133
1134
1135 if (keep_shader_info) {
1136 variant->nir_string = radv_dump_nir_shaders(shaders, shader_count);
1137 if (!gs_copy_shader && !module->nir) {
1138 variant->spirv = malloc(module->size);
1139 if (!variant->spirv) {
1140 free(variant);
1141 free(binary);
1142 return NULL;
1143 }
1144
1145 memcpy(variant->spirv, module->data, module->size);
1146 variant->spirv_size = module->size;
1147 }
1148 }
1149
1150 if (binary_out)
1151 *binary_out = binary;
1152 else
1153 free(binary);
1154
1155 return variant;
1156 }
1157
1158 struct radv_shader_variant *
1159 radv_shader_variant_compile(struct radv_device *device,
1160 struct radv_shader_module *module,
1161 struct nir_shader *const *shaders,
1162 int shader_count,
1163 struct radv_pipeline_layout *layout,
1164 const struct radv_shader_variant_key *key,
1165 struct radv_shader_info *info,
1166 bool keep_shader_info,
1167 bool use_aco,
1168 struct radv_shader_binary **binary_out)
1169 {
1170 struct radv_nir_compiler_options options = {0};
1171
1172 options.layout = layout;
1173 if (key)
1174 options.key = *key;
1175
1176 options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
1177 options.supports_spill = true;
1178 options.robust_buffer_access = device->robust_buffer_access;
1179
1180 return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage, info,
1181 &options, false, keep_shader_info, use_aco, binary_out);
1182 }
1183
1184 struct radv_shader_variant *
1185 radv_create_gs_copy_shader(struct radv_device *device,
1186 struct nir_shader *shader,
1187 struct radv_shader_info *info,
1188 struct radv_shader_binary **binary_out,
1189 bool keep_shader_info,
1190 bool multiview)
1191 {
1192 struct radv_nir_compiler_options options = {0};
1193
1194 options.key.has_multiview_view_index = multiview;
1195
1196 return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
1197 info, &options, true, keep_shader_info, false, binary_out);
1198 }
1199
1200 void
1201 radv_shader_variant_destroy(struct radv_device *device,
1202 struct radv_shader_variant *variant)
1203 {
1204 if (!p_atomic_dec_zero(&variant->ref_count))
1205 return;
1206
1207 mtx_lock(&device->shader_slab_mutex);
1208 list_del(&variant->slab_list);
1209 mtx_unlock(&device->shader_slab_mutex);
1210
1211 free(variant->spirv);
1212 free(variant->nir_string);
1213 free(variant->disasm_string);
1214 free(variant->ir_string);
1215 free(variant);
1216 }
1217
1218 const char *
1219 radv_get_shader_name(struct radv_shader_info *info,
1220 gl_shader_stage stage)
1221 {
1222 switch (stage) {
1223 case MESA_SHADER_VERTEX:
1224 if (info->vs.as_ls)
1225 return "Vertex Shader as LS";
1226 else if (info->vs.as_es)
1227 return "Vertex Shader as ES";
1228 else if (info->is_ngg)
1229 return "Vertex Shader as ESGS";
1230 else
1231 return "Vertex Shader as VS";
1232 case MESA_SHADER_TESS_CTRL:
1233 return "Tessellation Control Shader";
1234 case MESA_SHADER_TESS_EVAL:
1235 if (info->tes.as_es)
1236 return "Tessellation Evaluation Shader as ES";
1237 else if (info->is_ngg)
1238 return "Tessellation Evaluation Shader as ESGS";
1239 else
1240 return "Tessellation Evaluation Shader as VS";
1241 case MESA_SHADER_GEOMETRY:
1242 return "Geometry Shader";
1243 case MESA_SHADER_FRAGMENT:
1244 return "Pixel Shader";
1245 case MESA_SHADER_COMPUTE:
1246 return "Compute Shader";
1247 default:
1248 return "Unknown shader";
1249 };
1250 }
1251
1252 unsigned
1253 radv_get_max_workgroup_size(enum chip_class chip_class,
1254 gl_shader_stage stage,
1255 const unsigned *sizes)
1256 {
1257 switch (stage) {
1258 case MESA_SHADER_TESS_CTRL:
1259 return chip_class >= GFX7 ? 128 : 64;
1260 case MESA_SHADER_GEOMETRY:
1261 return chip_class >= GFX9 ? 128 : 64;
1262 case MESA_SHADER_COMPUTE:
1263 break;
1264 default:
1265 return 0;
1266 }
1267
1268 unsigned max_workgroup_size = sizes[0] * sizes[1] * sizes[2];
1269 return max_workgroup_size;
1270 }
1271
1272 unsigned
1273 radv_get_max_waves(struct radv_device *device,
1274 struct radv_shader_variant *variant,
1275 gl_shader_stage stage)
1276 {
1277 enum chip_class chip_class = device->physical_device->rad_info.chip_class;
1278 unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
1279 uint8_t wave_size = variant->info.wave_size;
1280 struct ac_shader_config *conf = &variant->config;
1281 unsigned max_simd_waves;
1282 unsigned lds_per_wave = 0;
1283
1284 max_simd_waves = device->physical_device->rad_info.max_wave64_per_simd;
1285
1286 if (stage == MESA_SHADER_FRAGMENT) {
1287 lds_per_wave = conf->lds_size * lds_increment +
1288 align(variant->info.ps.num_interp * 48,
1289 lds_increment);
1290 } else if (stage == MESA_SHADER_COMPUTE) {
1291 unsigned max_workgroup_size =
1292 radv_get_max_workgroup_size(chip_class, stage, variant->info.cs.block_size);
1293 lds_per_wave = (conf->lds_size * lds_increment) /
1294 DIV_ROUND_UP(max_workgroup_size, wave_size);
1295 }
1296
1297 if (conf->num_sgprs) {
1298 unsigned sgprs = align(conf->num_sgprs, chip_class >= GFX8 ? 16 : 8);
1299 max_simd_waves =
1300 MIN2(max_simd_waves,
1301 device->physical_device->rad_info.num_physical_sgprs_per_simd /
1302 sgprs);
1303 }
1304
1305 if (conf->num_vgprs) {
1306 unsigned vgprs = align(conf->num_vgprs, wave_size == 32 ? 8 : 4);
1307 max_simd_waves =
1308 MIN2(max_simd_waves,
1309 RADV_NUM_PHYSICAL_VGPRS / vgprs);
1310 }
1311
1312 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
1313 * that PS can use.
1314 */
1315 if (lds_per_wave)
1316 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
1317
1318 return max_simd_waves;
1319 }
1320
1321 static void
1322 generate_shader_stats(struct radv_device *device,
1323 struct radv_shader_variant *variant,
1324 gl_shader_stage stage,
1325 struct _mesa_string_buffer *buf)
1326 {
1327 struct ac_shader_config *conf = &variant->config;
1328 unsigned max_simd_waves = radv_get_max_waves(device, variant, stage);
1329
1330 if (stage == MESA_SHADER_FRAGMENT) {
1331 _mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
1332 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1333 "SPI_PS_INPUT_ENA = 0x%04x\n",
1334 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1335 }
1336
1337 _mesa_string_buffer_printf(buf, "*** SHADER STATS ***\n"
1338 "SGPRS: %d\n"
1339 "VGPRS: %d\n"
1340 "Spilled SGPRs: %d\n"
1341 "Spilled VGPRs: %d\n"
1342 "PrivMem VGPRS: %d\n"
1343 "Code Size: %d bytes\n"
1344 "LDS: %d blocks\n"
1345 "Scratch: %d bytes per wave\n"
1346 "Max Waves: %d\n"
1347 "********************\n\n\n",
1348 conf->num_sgprs, conf->num_vgprs,
1349 conf->spilled_sgprs, conf->spilled_vgprs,
1350 variant->info.private_mem_vgprs, variant->exec_size,
1351 conf->lds_size, conf->scratch_bytes_per_wave,
1352 max_simd_waves);
1353 }
1354
1355 void
1356 radv_shader_dump_stats(struct radv_device *device,
1357 struct radv_shader_variant *variant,
1358 gl_shader_stage stage,
1359 FILE *file)
1360 {
1361 struct _mesa_string_buffer *buf = _mesa_string_buffer_create(NULL, 256);
1362
1363 generate_shader_stats(device, variant, stage, buf);
1364
1365 fprintf(file, "\n%s:\n", radv_get_shader_name(&variant->info, stage));
1366 fprintf(file, "%s", buf->buf);
1367
1368 _mesa_string_buffer_destroy(buf);
1369 }
1370
1371 VkResult
1372 radv_GetShaderInfoAMD(VkDevice _device,
1373 VkPipeline _pipeline,
1374 VkShaderStageFlagBits shaderStage,
1375 VkShaderInfoTypeAMD infoType,
1376 size_t* pInfoSize,
1377 void* pInfo)
1378 {
1379 RADV_FROM_HANDLE(radv_device, device, _device);
1380 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1381 gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
1382 struct radv_shader_variant *variant = pipeline->shaders[stage];
1383 struct _mesa_string_buffer *buf;
1384 VkResult result = VK_SUCCESS;
1385
1386 /* Spec doesn't indicate what to do if the stage is invalid, so just
1387 * return no info for this. */
1388 if (!variant)
1389 return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
1390
1391 switch (infoType) {
1392 case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
1393 if (!pInfo) {
1394 *pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
1395 } else {
1396 unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
1397 struct ac_shader_config *conf = &variant->config;
1398
1399 VkShaderStatisticsInfoAMD statistics = {};
1400 statistics.shaderStageMask = shaderStage;
1401 statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
1402 statistics.numPhysicalSgprs = device->physical_device->rad_info.num_physical_sgprs_per_simd;
1403 statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
1404
1405 if (stage == MESA_SHADER_COMPUTE) {
1406 unsigned *local_size = variant->info.cs.block_size;
1407 unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
1408
1409 statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
1410 ceil((double)workgroup_size / statistics.numPhysicalVgprs);
1411
1412 statistics.computeWorkGroupSize[0] = local_size[0];
1413 statistics.computeWorkGroupSize[1] = local_size[1];
1414 statistics.computeWorkGroupSize[2] = local_size[2];
1415 } else {
1416 statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
1417 }
1418
1419 statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
1420 statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
1421 statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
1422 statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
1423 statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
1424
1425 size_t size = *pInfoSize;
1426 *pInfoSize = sizeof(statistics);
1427
1428 memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
1429
1430 if (size < *pInfoSize)
1431 result = VK_INCOMPLETE;
1432 }
1433
1434 break;
1435 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
1436 buf = _mesa_string_buffer_create(NULL, 1024);
1437
1438 _mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(&variant->info, stage));
1439 _mesa_string_buffer_printf(buf, "%s\n\n", variant->ir_string);
1440 _mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
1441 generate_shader_stats(device, variant, stage, buf);
1442
1443 /* Need to include the null terminator. */
1444 size_t length = buf->length + 1;
1445
1446 if (!pInfo) {
1447 *pInfoSize = length;
1448 } else {
1449 size_t size = *pInfoSize;
1450 *pInfoSize = length;
1451
1452 memcpy(pInfo, buf->buf, MIN2(size, length));
1453
1454 if (size < length)
1455 result = VK_INCOMPLETE;
1456 }
1457
1458 _mesa_string_buffer_destroy(buf);
1459 break;
1460 default:
1461 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
1462 result = VK_ERROR_FEATURE_NOT_PRESENT;
1463 break;
1464 }
1465
1466 return result;
1467 }