radv: Disable lower_io_to_temporaries during deref changes.
[mesa.git] / src / amd / vulkan / radv_shader.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "nir/nir.h"
34 #include "nir/nir_builder.h"
35 #include "spirv/nir_spirv.h"
36
37 #include <llvm-c/Core.h>
38 #include <llvm-c/TargetMachine.h>
39 #include <llvm-c/Support.h>
40
41 #include "sid.h"
42 #include "gfx9d.h"
43 #include "ac_binary.h"
44 #include "ac_llvm_util.h"
45 #include "ac_nir_to_llvm.h"
46 #include "vk_format.h"
47 #include "util/debug.h"
48 #include "ac_exp_param.h"
49
50 #include "util/string_buffer.h"
51
52 static const struct nir_shader_compiler_options nir_options = {
53 .vertex_id_zero_based = true,
54 .lower_scmp = true,
55 .lower_flrp32 = true,
56 .lower_flrp64 = true,
57 .lower_device_index_to_zero = true,
58 .lower_fsat = true,
59 .lower_fdiv = true,
60 .lower_sub = true,
61 .lower_pack_snorm_2x16 = true,
62 .lower_pack_snorm_4x8 = true,
63 .lower_pack_unorm_2x16 = true,
64 .lower_pack_unorm_4x8 = true,
65 .lower_unpack_snorm_2x16 = true,
66 .lower_unpack_snorm_4x8 = true,
67 .lower_unpack_unorm_2x16 = true,
68 .lower_unpack_unorm_4x8 = true,
69 .lower_extract_byte = true,
70 .lower_extract_word = true,
71 .lower_ffma = true,
72 .lower_fpow = true,
73 .vs_inputs_dual_locations = true,
74 .max_unroll_iterations = 32
75 };
76
77 VkResult radv_CreateShaderModule(
78 VkDevice _device,
79 const VkShaderModuleCreateInfo* pCreateInfo,
80 const VkAllocationCallbacks* pAllocator,
81 VkShaderModule* pShaderModule)
82 {
83 RADV_FROM_HANDLE(radv_device, device, _device);
84 struct radv_shader_module *module;
85
86 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
87 assert(pCreateInfo->flags == 0);
88
89 module = vk_alloc2(&device->alloc, pAllocator,
90 sizeof(*module) + pCreateInfo->codeSize, 8,
91 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
92 if (module == NULL)
93 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
94
95 module->nir = NULL;
96 module->size = pCreateInfo->codeSize;
97 memcpy(module->data, pCreateInfo->pCode, module->size);
98
99 _mesa_sha1_compute(module->data, module->size, module->sha1);
100
101 *pShaderModule = radv_shader_module_to_handle(module);
102
103 return VK_SUCCESS;
104 }
105
106 void radv_DestroyShaderModule(
107 VkDevice _device,
108 VkShaderModule _module,
109 const VkAllocationCallbacks* pAllocator)
110 {
111 RADV_FROM_HANDLE(radv_device, device, _device);
112 RADV_FROM_HANDLE(radv_shader_module, module, _module);
113
114 if (!module)
115 return;
116
117 vk_free2(&device->alloc, pAllocator, module);
118 }
119
120 void
121 radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively)
122 {
123 bool progress;
124
125 do {
126 progress = false;
127
128 NIR_PASS_V(shader, nir_lower_vars_to_ssa);
129 NIR_PASS_V(shader, nir_lower_pack);
130 NIR_PASS_V(shader, nir_lower_alu_to_scalar);
131 NIR_PASS_V(shader, nir_lower_phis_to_scalar);
132
133 NIR_PASS(progress, shader, nir_copy_prop);
134 NIR_PASS(progress, shader, nir_opt_remove_phis);
135 NIR_PASS(progress, shader, nir_opt_dce);
136 if (nir_opt_trivial_continues(shader)) {
137 progress = true;
138 NIR_PASS(progress, shader, nir_copy_prop);
139 NIR_PASS(progress, shader, nir_opt_remove_phis);
140 NIR_PASS(progress, shader, nir_opt_dce);
141 }
142 NIR_PASS(progress, shader, nir_opt_if);
143 NIR_PASS(progress, shader, nir_opt_dead_cf);
144 NIR_PASS(progress, shader, nir_opt_cse);
145 NIR_PASS(progress, shader, nir_opt_peephole_select, 8);
146 NIR_PASS(progress, shader, nir_opt_algebraic);
147 NIR_PASS(progress, shader, nir_opt_constant_folding);
148 NIR_PASS(progress, shader, nir_opt_undef);
149 NIR_PASS(progress, shader, nir_opt_conditional_discard);
150 if (shader->options->max_unroll_iterations) {
151 NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
152 }
153 } while (progress && !optimize_conservatively);
154
155 NIR_PASS(progress, shader, nir_opt_shrink_load);
156 NIR_PASS(progress, shader, nir_opt_move_load_ubo);
157 }
158
159 nir_shader *
160 radv_shader_compile_to_nir(struct radv_device *device,
161 struct radv_shader_module *module,
162 const char *entrypoint_name,
163 gl_shader_stage stage,
164 const VkSpecializationInfo *spec_info,
165 const VkPipelineCreateFlags flags)
166 {
167 nir_shader *nir;
168 nir_function *entry_point;
169 if (module->nir) {
170 /* Some things such as our meta clear/blit code will give us a NIR
171 * shader directly. In that case, we just ignore the SPIR-V entirely
172 * and just use the NIR shader */
173 nir = module->nir;
174 nir->options = &nir_options;
175 nir_validate_shader(nir);
176
177 assert(exec_list_length(&nir->functions) == 1);
178 struct exec_node *node = exec_list_get_head(&nir->functions);
179 entry_point = exec_node_data(nir_function, node, node);
180
181 NIR_PASS_V(nir, nir_lower_deref_instrs, ~0);
182 } else {
183 uint32_t *spirv = (uint32_t *) module->data;
184 assert(module->size % 4 == 0);
185
186 if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
187 radv_print_spirv(spirv, module->size, stderr);
188
189 uint32_t num_spec_entries = 0;
190 struct nir_spirv_specialization *spec_entries = NULL;
191 if (spec_info && spec_info->mapEntryCount > 0) {
192 num_spec_entries = spec_info->mapEntryCount;
193 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
194 for (uint32_t i = 0; i < num_spec_entries; i++) {
195 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
196 const void *data = spec_info->pData + entry.offset;
197 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
198
199 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
200 if (spec_info->dataSize == 8)
201 spec_entries[i].data64 = *(const uint64_t *)data;
202 else
203 spec_entries[i].data32 = *(const uint32_t *)data;
204 }
205 }
206 const struct spirv_to_nir_options spirv_options = {
207 .caps = {
208 .device_group = true,
209 .draw_parameters = true,
210 .float64 = true,
211 .image_read_without_format = true,
212 .image_write_without_format = true,
213 .tessellation = true,
214 .int64 = true,
215 .multiview = true,
216 .subgroup_ballot = true,
217 .subgroup_basic = true,
218 .subgroup_quad = true,
219 .subgroup_shuffle = true,
220 .subgroup_vote = true,
221 .variable_pointers = true,
222 .gcn_shader = true,
223 .trinary_minmax = true,
224 .shader_viewport_index_layer = true,
225 .descriptor_array_dynamic_indexing = true,
226 .runtime_descriptor_array = true,
227 },
228 };
229 entry_point = spirv_to_nir(spirv, module->size / 4,
230 spec_entries, num_spec_entries,
231 stage, entrypoint_name,
232 &spirv_options, &nir_options);
233 nir = entry_point->shader;
234 assert(nir->info.stage == stage);
235 nir_validate_shader(nir);
236
237 free(spec_entries);
238
239 /* We have to lower away local constant initializers right before we
240 * inline functions. That way they get properly initialized at the top
241 * of the function and not at the top of its caller.
242 */
243 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_local);
244 NIR_PASS_V(nir, nir_lower_returns);
245 NIR_PASS_V(nir, nir_inline_functions);
246 NIR_PASS_V(nir, nir_copy_prop);
247
248 /* Pick off the single entrypoint that we want */
249 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
250 if (func != entry_point)
251 exec_node_remove(&func->node);
252 }
253 assert(exec_list_length(&nir->functions) == 1);
254 entry_point->name = ralloc_strdup(entry_point, "main");
255
256 NIR_PASS_V(nir, nir_lower_deref_instrs, ~0);
257
258 /* Make sure we lower constant initializers on output variables so that
259 * nir_remove_dead_variables below sees the corresponding stores
260 */
261 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
262
263 NIR_PASS_V(nir, nir_remove_dead_variables,
264 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
265
266 /* Now that we've deleted all but the main function, we can go ahead and
267 * lower the rest of the constant initializers.
268 */
269 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
270
271 /* Split member structs. We do this before lower_io_to_temporaries so that
272 * it doesn't lower system values to temporaries by accident.
273 */
274 NIR_PASS_V(nir, nir_split_var_copies);
275 NIR_PASS_V(nir, nir_split_per_member_structs);
276
277 NIR_PASS_V(nir, nir_lower_system_values);
278 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
279 }
280
281 /* Vulkan uses the separate-shader linking model */
282 nir->info.separate_shader = true;
283
284 nir_shader_gather_info(nir, entry_point->impl);
285
286 static const nir_lower_tex_options tex_options = {
287 .lower_txp = ~0,
288 };
289
290 nir_lower_tex(nir, &tex_options);
291
292 nir_lower_vars_to_ssa(nir);
293
294 /* Temporarily disabled during deref changes */
295 #if 0
296 if (nir->info.stage == MESA_SHADER_VERTEX ||
297 nir->info.stage == MESA_SHADER_GEOMETRY) {
298 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
299 nir_shader_get_entrypoint(nir), true, true);
300 } else if (nir->info.stage == MESA_SHADER_TESS_EVAL||
301 nir->info.stage == MESA_SHADER_FRAGMENT) {
302 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
303 nir_shader_get_entrypoint(nir), true, false);
304 }
305 #endif
306
307 nir_split_var_copies(nir);
308 nir_lower_var_copies(nir);
309
310 nir_lower_global_vars_to_local(nir);
311 nir_remove_dead_variables(nir, nir_var_local);
312 nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
313 .subgroup_size = 64,
314 .ballot_bit_size = 64,
315 .lower_to_scalar = 1,
316 .lower_subgroup_masks = 1,
317 .lower_shuffle = 1,
318 .lower_shuffle_to_32bit = 1,
319 .lower_vote_eq_to_ballot = 1,
320 });
321
322 if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
323 radv_optimize_nir(nir, false);
324
325 /* Indirect lowering must be called after the radv_optimize_nir() loop
326 * has been called at least once. Otherwise indirect lowering can
327 * bloat the instruction count of the loop and cause it to be
328 * considered too large for unrolling.
329 */
330 ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
331 radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT);
332
333 return nir;
334 }
335
336 void *
337 radv_alloc_shader_memory(struct radv_device *device,
338 struct radv_shader_variant *shader)
339 {
340 mtx_lock(&device->shader_slab_mutex);
341 list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
342 uint64_t offset = 0;
343 list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
344 if (s->bo_offset - offset >= shader->code_size) {
345 shader->bo = slab->bo;
346 shader->bo_offset = offset;
347 list_addtail(&shader->slab_list, &s->slab_list);
348 mtx_unlock(&device->shader_slab_mutex);
349 return slab->ptr + offset;
350 }
351 offset = align_u64(s->bo_offset + s->code_size, 256);
352 }
353 if (slab->size - offset >= shader->code_size) {
354 shader->bo = slab->bo;
355 shader->bo_offset = offset;
356 list_addtail(&shader->slab_list, &slab->shaders);
357 mtx_unlock(&device->shader_slab_mutex);
358 return slab->ptr + offset;
359 }
360 }
361
362 mtx_unlock(&device->shader_slab_mutex);
363 struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
364
365 slab->size = 256 * 1024;
366 slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
367 RADEON_DOMAIN_VRAM,
368 RADEON_FLAG_NO_INTERPROCESS_SHARING |
369 device->physical_device->cpdma_prefetch_writes_memory ?
370 0 : RADEON_FLAG_READ_ONLY);
371 slab->ptr = (char*)device->ws->buffer_map(slab->bo);
372 list_inithead(&slab->shaders);
373
374 mtx_lock(&device->shader_slab_mutex);
375 list_add(&slab->slabs, &device->shader_slabs);
376
377 shader->bo = slab->bo;
378 shader->bo_offset = 0;
379 list_add(&shader->slab_list, &slab->shaders);
380 mtx_unlock(&device->shader_slab_mutex);
381 return slab->ptr;
382 }
383
384 void
385 radv_destroy_shader_slabs(struct radv_device *device)
386 {
387 list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
388 device->ws->buffer_destroy(slab->bo);
389 free(slab);
390 }
391 mtx_destroy(&device->shader_slab_mutex);
392 }
393
394 static void
395 radv_fill_shader_variant(struct radv_device *device,
396 struct radv_shader_variant *variant,
397 struct ac_shader_binary *binary,
398 gl_shader_stage stage)
399 {
400 bool scratch_enabled = variant->config.scratch_bytes_per_wave > 0;
401 struct radv_shader_info *info = &variant->info.info;
402 unsigned vgpr_comp_cnt = 0;
403
404 variant->code_size = binary->code_size;
405 variant->rsrc2 = S_00B12C_USER_SGPR(variant->info.num_user_sgprs) |
406 S_00B12C_SCRATCH_EN(scratch_enabled);
407
408 variant->rsrc1 = S_00B848_VGPRS((variant->config.num_vgprs - 1) / 4) |
409 S_00B848_SGPRS((variant->config.num_sgprs - 1) / 8) |
410 S_00B848_DX10_CLAMP(1) |
411 S_00B848_FLOAT_MODE(variant->config.float_mode);
412
413 switch (stage) {
414 case MESA_SHADER_TESS_EVAL:
415 vgpr_comp_cnt = 3;
416 variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
417 break;
418 case MESA_SHADER_TESS_CTRL:
419 if (device->physical_device->rad_info.chip_class >= GFX9) {
420 vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
421 } else {
422 variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
423 }
424 break;
425 case MESA_SHADER_VERTEX:
426 case MESA_SHADER_GEOMETRY:
427 vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
428 break;
429 case MESA_SHADER_FRAGMENT:
430 break;
431 case MESA_SHADER_COMPUTE:
432 variant->rsrc2 |=
433 S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
434 S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
435 S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
436 S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
437 info->cs.uses_thread_id[1] ? 1 : 0) |
438 S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
439 S_00B84C_LDS_SIZE(variant->config.lds_size);
440 break;
441 default:
442 unreachable("unsupported shader type");
443 break;
444 }
445
446 if (device->physical_device->rad_info.chip_class >= GFX9 &&
447 stage == MESA_SHADER_GEOMETRY) {
448 unsigned es_type = variant->info.gs.es_type;
449 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
450
451 if (es_type == MESA_SHADER_VERTEX) {
452 es_vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
453 } else if (es_type == MESA_SHADER_TESS_EVAL) {
454 es_vgpr_comp_cnt = 3;
455 } else {
456 unreachable("invalid shader ES type");
457 }
458
459 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
460 * VGPR[0:4] are always loaded.
461 */
462 if (info->uses_invocation_id) {
463 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
464 } else if (info->uses_prim_id) {
465 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
466 } else if (variant->info.gs.vertices_in >= 3) {
467 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
468 } else {
469 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
470 }
471
472 variant->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
473 variant->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
474 S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
475 } else if (device->physical_device->rad_info.chip_class >= GFX9 &&
476 stage == MESA_SHADER_TESS_CTRL) {
477 variant->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
478 } else {
479 variant->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
480 }
481
482 void *ptr = radv_alloc_shader_memory(device, variant);
483 memcpy(ptr, binary->code, binary->code_size);
484 }
485
486 static void radv_init_llvm_target()
487 {
488 LLVMInitializeAMDGPUTargetInfo();
489 LLVMInitializeAMDGPUTarget();
490 LLVMInitializeAMDGPUTargetMC();
491 LLVMInitializeAMDGPUAsmPrinter();
492
493 /* For inline assembly. */
494 LLVMInitializeAMDGPUAsmParser();
495
496 /* Workaround for bug in llvm 4.0 that causes image intrinsics
497 * to disappear.
498 * https://reviews.llvm.org/D26348
499 *
500 * Workaround for bug in llvm that causes the GPU to hang in presence
501 * of nested loops because there is an exec mask issue. The proper
502 * solution is to fix LLVM but this might require a bunch of work.
503 * https://bugs.llvm.org/show_bug.cgi?id=37744
504 *
505 * "mesa" is the prefix for error messages.
506 */
507 const char *argv[3] = { "mesa", "-simplifycfg-sink-common=false",
508 "-amdgpu-skip-threshold=1" };
509 LLVMParseCommandLineOptions(3, argv, NULL);
510 }
511
512 static once_flag radv_init_llvm_target_once_flag = ONCE_FLAG_INIT;
513
514 static LLVMTargetRef radv_get_llvm_target(const char *triple)
515 {
516 LLVMTargetRef target = NULL;
517 char *err_message = NULL;
518
519 call_once(&radv_init_llvm_target_once_flag, radv_init_llvm_target);
520
521 if (LLVMGetTargetFromTriple(triple, &target, &err_message)) {
522 fprintf(stderr, "Cannot find target for triple %s ", triple);
523 if (err_message) {
524 fprintf(stderr, "%s\n", err_message);
525 }
526 LLVMDisposeMessage(err_message);
527 return NULL;
528 }
529 return target;
530 }
531
532 static LLVMTargetMachineRef radv_create_target_machine(enum radeon_family family,
533 enum ac_target_machine_options tm_options,
534 const char **out_triple)
535 {
536 assert(family >= CHIP_TAHITI);
537 char features[256];
538 const char *triple = (tm_options & AC_TM_SUPPORTS_SPILL) ? "amdgcn-mesa-mesa3d" : "amdgcn--";
539 LLVMTargetRef target = radv_get_llvm_target(triple);
540
541 snprintf(features, sizeof(features),
542 "+DumpCode,+vgpr-spilling,-fp32-denormals,+fp64-denormals%s%s%s%s",
543 tm_options & AC_TM_SISCHED ? ",+si-scheduler" : "",
544 tm_options & AC_TM_FORCE_ENABLE_XNACK ? ",+xnack" : "",
545 tm_options & AC_TM_FORCE_DISABLE_XNACK ? ",-xnack" : "",
546 tm_options & AC_TM_PROMOTE_ALLOCA_TO_SCRATCH ? ",-promote-alloca" : "");
547
548 LLVMTargetMachineRef tm = LLVMCreateTargetMachine(
549 target,
550 triple,
551 ac_get_llvm_processor_name(family),
552 features,
553 LLVMCodeGenLevelDefault,
554 LLVMRelocDefault,
555 LLVMCodeModelDefault);
556
557 if (out_triple)
558 *out_triple = triple;
559 return tm;
560 }
561
562 static struct radv_shader_variant *
563 shader_variant_create(struct radv_device *device,
564 struct radv_shader_module *module,
565 struct nir_shader * const *shaders,
566 int shader_count,
567 gl_shader_stage stage,
568 struct radv_nir_compiler_options *options,
569 bool gs_copy_shader,
570 void **code_out,
571 unsigned *code_size_out)
572 {
573 enum radeon_family chip_family = device->physical_device->rad_info.family;
574 enum ac_target_machine_options tm_options = 0;
575 struct radv_shader_variant *variant;
576 struct ac_shader_binary binary;
577 LLVMTargetMachineRef tm;
578
579 variant = calloc(1, sizeof(struct radv_shader_variant));
580 if (!variant)
581 return NULL;
582
583 options->family = chip_family;
584 options->chip_class = device->physical_device->rad_info.chip_class;
585 options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
586 options->dump_preoptir = options->dump_shader &&
587 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
588 options->record_llvm_ir = device->keep_shader_info;
589 options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
590 options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
591 options->address32_hi = device->physical_device->rad_info.address32_hi;
592
593 if (options->supports_spill)
594 tm_options |= AC_TM_SUPPORTS_SPILL;
595 if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
596 tm_options |= AC_TM_SISCHED;
597 tm = radv_create_target_machine(chip_family, tm_options, NULL);
598
599 if (gs_copy_shader) {
600 assert(shader_count == 1);
601 radv_compile_gs_copy_shader(tm, *shaders, &binary,
602 &variant->config, &variant->info,
603 options);
604 } else {
605 radv_compile_nir_shader(tm, &binary, &variant->config,
606 &variant->info, shaders, shader_count,
607 options);
608 }
609
610 LLVMDisposeTargetMachine(tm);
611
612 radv_fill_shader_variant(device, variant, &binary, stage);
613
614 if (code_out) {
615 *code_out = binary.code;
616 *code_size_out = binary.code_size;
617 } else
618 free(binary.code);
619 free(binary.config);
620 free(binary.rodata);
621 free(binary.global_symbol_offsets);
622 free(binary.relocs);
623 variant->ref_count = 1;
624
625 if (device->keep_shader_info) {
626 variant->disasm_string = binary.disasm_string;
627 variant->llvm_ir_string = binary.llvm_ir_string;
628 if (!gs_copy_shader && !module->nir) {
629 variant->nir = *shaders;
630 variant->spirv = (uint32_t *)module->data;
631 variant->spirv_size = module->size;
632 }
633 } else {
634 free(binary.disasm_string);
635 }
636
637 return variant;
638 }
639
640 struct radv_shader_variant *
641 radv_shader_variant_create(struct radv_device *device,
642 struct radv_shader_module *module,
643 struct nir_shader *const *shaders,
644 int shader_count,
645 struct radv_pipeline_layout *layout,
646 const struct radv_shader_variant_key *key,
647 void **code_out,
648 unsigned *code_size_out)
649 {
650 struct radv_nir_compiler_options options = {0};
651
652 options.layout = layout;
653 if (key)
654 options.key = *key;
655
656 options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
657 options.supports_spill = true;
658
659 return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
660 &options, false, code_out, code_size_out);
661 }
662
663 struct radv_shader_variant *
664 radv_create_gs_copy_shader(struct radv_device *device,
665 struct nir_shader *shader,
666 void **code_out,
667 unsigned *code_size_out,
668 bool multiview)
669 {
670 struct radv_nir_compiler_options options = {0};
671
672 options.key.has_multiview_view_index = multiview;
673
674 return shader_variant_create(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
675 &options, true, code_out, code_size_out);
676 }
677
678 void
679 radv_shader_variant_destroy(struct radv_device *device,
680 struct radv_shader_variant *variant)
681 {
682 if (!p_atomic_dec_zero(&variant->ref_count))
683 return;
684
685 mtx_lock(&device->shader_slab_mutex);
686 list_del(&variant->slab_list);
687 mtx_unlock(&device->shader_slab_mutex);
688
689 ralloc_free(variant->nir);
690 free(variant->disasm_string);
691 free(variant->llvm_ir_string);
692 free(variant);
693 }
694
695 const char *
696 radv_get_shader_name(struct radv_shader_variant *var, gl_shader_stage stage)
697 {
698 switch (stage) {
699 case MESA_SHADER_VERTEX: return var->info.vs.as_ls ? "Vertex Shader as LS" : var->info.vs.as_es ? "Vertex Shader as ES" : "Vertex Shader as VS";
700 case MESA_SHADER_GEOMETRY: return "Geometry Shader";
701 case MESA_SHADER_FRAGMENT: return "Pixel Shader";
702 case MESA_SHADER_COMPUTE: return "Compute Shader";
703 case MESA_SHADER_TESS_CTRL: return "Tessellation Control Shader";
704 case MESA_SHADER_TESS_EVAL: return var->info.tes.as_es ? "Tessellation Evaluation Shader as ES" : "Tessellation Evaluation Shader as VS";
705 default:
706 return "Unknown shader";
707 };
708 }
709
710 static void
711 generate_shader_stats(struct radv_device *device,
712 struct radv_shader_variant *variant,
713 gl_shader_stage stage,
714 struct _mesa_string_buffer *buf)
715 {
716 unsigned lds_increment = device->physical_device->rad_info.chip_class >= CIK ? 512 : 256;
717 struct ac_shader_config *conf;
718 unsigned max_simd_waves;
719 unsigned lds_per_wave = 0;
720
721 max_simd_waves = ac_get_max_simd_waves(device->physical_device->rad_info.family);
722
723 conf = &variant->config;
724
725 if (stage == MESA_SHADER_FRAGMENT) {
726 lds_per_wave = conf->lds_size * lds_increment +
727 align(variant->info.fs.num_interp * 48,
728 lds_increment);
729 }
730
731 if (conf->num_sgprs)
732 max_simd_waves =
733 MIN2(max_simd_waves,
734 radv_get_num_physical_sgprs(device->physical_device) / conf->num_sgprs);
735
736 if (conf->num_vgprs)
737 max_simd_waves =
738 MIN2(max_simd_waves,
739 RADV_NUM_PHYSICAL_VGPRS / conf->num_vgprs);
740
741 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
742 * that PS can use.
743 */
744 if (lds_per_wave)
745 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
746
747 if (stage == MESA_SHADER_FRAGMENT) {
748 _mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
749 "SPI_PS_INPUT_ADDR = 0x%04x\n"
750 "SPI_PS_INPUT_ENA = 0x%04x\n",
751 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
752 }
753
754 _mesa_string_buffer_printf(buf, "*** SHADER STATS ***\n"
755 "SGPRS: %d\n"
756 "VGPRS: %d\n"
757 "Spilled SGPRs: %d\n"
758 "Spilled VGPRs: %d\n"
759 "PrivMem VGPRS: %d\n"
760 "Code Size: %d bytes\n"
761 "LDS: %d blocks\n"
762 "Scratch: %d bytes per wave\n"
763 "Max Waves: %d\n"
764 "********************\n\n\n",
765 conf->num_sgprs, conf->num_vgprs,
766 conf->spilled_sgprs, conf->spilled_vgprs,
767 variant->info.private_mem_vgprs, variant->code_size,
768 conf->lds_size, conf->scratch_bytes_per_wave,
769 max_simd_waves);
770 }
771
772 void
773 radv_shader_dump_stats(struct radv_device *device,
774 struct radv_shader_variant *variant,
775 gl_shader_stage stage,
776 FILE *file)
777 {
778 struct _mesa_string_buffer *buf = _mesa_string_buffer_create(NULL, 256);
779
780 generate_shader_stats(device, variant, stage, buf);
781
782 fprintf(file, "\n%s:\n", radv_get_shader_name(variant, stage));
783 fprintf(file, "%s", buf->buf);
784
785 _mesa_string_buffer_destroy(buf);
786 }
787
788 VkResult
789 radv_GetShaderInfoAMD(VkDevice _device,
790 VkPipeline _pipeline,
791 VkShaderStageFlagBits shaderStage,
792 VkShaderInfoTypeAMD infoType,
793 size_t* pInfoSize,
794 void* pInfo)
795 {
796 RADV_FROM_HANDLE(radv_device, device, _device);
797 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
798 gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
799 struct radv_shader_variant *variant = pipeline->shaders[stage];
800 struct _mesa_string_buffer *buf;
801 VkResult result = VK_SUCCESS;
802
803 /* Spec doesn't indicate what to do if the stage is invalid, so just
804 * return no info for this. */
805 if (!variant)
806 return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
807
808 switch (infoType) {
809 case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
810 if (!pInfo) {
811 *pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
812 } else {
813 unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= CIK ? 512 : 256;
814 struct ac_shader_config *conf = &variant->config;
815
816 VkShaderStatisticsInfoAMD statistics = {};
817 statistics.shaderStageMask = shaderStage;
818 statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
819 statistics.numPhysicalSgprs = radv_get_num_physical_sgprs(device->physical_device);
820 statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
821
822 if (stage == MESA_SHADER_COMPUTE) {
823 unsigned *local_size = variant->nir->info.cs.local_size;
824 unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
825
826 statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
827 ceil((double)workgroup_size / statistics.numPhysicalVgprs);
828
829 statistics.computeWorkGroupSize[0] = local_size[0];
830 statistics.computeWorkGroupSize[1] = local_size[1];
831 statistics.computeWorkGroupSize[2] = local_size[2];
832 } else {
833 statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
834 }
835
836 statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
837 statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
838 statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
839 statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
840 statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
841
842 size_t size = *pInfoSize;
843 *pInfoSize = sizeof(statistics);
844
845 memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
846
847 if (size < *pInfoSize)
848 result = VK_INCOMPLETE;
849 }
850
851 break;
852 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
853 buf = _mesa_string_buffer_create(NULL, 1024);
854
855 _mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(variant, stage));
856 _mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
857 generate_shader_stats(device, variant, stage, buf);
858
859 /* Need to include the null terminator. */
860 size_t length = buf->length + 1;
861
862 if (!pInfo) {
863 *pInfoSize = length;
864 } else {
865 size_t size = *pInfoSize;
866 *pInfoSize = length;
867
868 memcpy(pInfo, buf->buf, MIN2(size, length));
869
870 if (size < length)
871 result = VK_INCOMPLETE;
872 }
873
874 _mesa_string_buffer_destroy(buf);
875 break;
876 default:
877 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
878 result = VK_ERROR_FEATURE_NOT_PRESENT;
879 break;
880 }
881
882 return result;
883 }