radv: add support for VK_AMD_buffer_marker
[mesa.git] / src / amd / vulkan / radv_shader.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
34 #include "nir/nir.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
37
38 #include <llvm-c/Core.h>
39 #include <llvm-c/TargetMachine.h>
40 #include <llvm-c/Support.h>
41
42 #include "sid.h"
43 #include "ac_binary.h"
44 #include "ac_llvm_util.h"
45 #include "ac_nir_to_llvm.h"
46 #include "vk_format.h"
47 #include "util/debug.h"
48 #include "ac_exp_param.h"
49
50 #include "util/string_buffer.h"
51
52 static const struct nir_shader_compiler_options nir_options = {
53 .vertex_id_zero_based = true,
54 .lower_scmp = true,
55 .lower_flrp16 = true,
56 .lower_flrp32 = true,
57 .lower_flrp64 = true,
58 .lower_device_index_to_zero = true,
59 .lower_fsat = true,
60 .lower_fdiv = true,
61 .lower_sub = true,
62 .lower_pack_snorm_2x16 = true,
63 .lower_pack_snorm_4x8 = true,
64 .lower_pack_unorm_2x16 = true,
65 .lower_pack_unorm_4x8 = true,
66 .lower_unpack_snorm_2x16 = true,
67 .lower_unpack_snorm_4x8 = true,
68 .lower_unpack_unorm_2x16 = true,
69 .lower_unpack_unorm_4x8 = true,
70 .lower_extract_byte = true,
71 .lower_extract_word = true,
72 .lower_ffma = true,
73 .lower_fpow = true,
74 .lower_mul_2x32_64 = true,
75 .max_unroll_iterations = 32
76 };
77
78 VkResult radv_CreateShaderModule(
79 VkDevice _device,
80 const VkShaderModuleCreateInfo* pCreateInfo,
81 const VkAllocationCallbacks* pAllocator,
82 VkShaderModule* pShaderModule)
83 {
84 RADV_FROM_HANDLE(radv_device, device, _device);
85 struct radv_shader_module *module;
86
87 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
88 assert(pCreateInfo->flags == 0);
89
90 module = vk_alloc2(&device->alloc, pAllocator,
91 sizeof(*module) + pCreateInfo->codeSize, 8,
92 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
93 if (module == NULL)
94 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
95
96 module->nir = NULL;
97 module->size = pCreateInfo->codeSize;
98 memcpy(module->data, pCreateInfo->pCode, module->size);
99
100 _mesa_sha1_compute(module->data, module->size, module->sha1);
101
102 *pShaderModule = radv_shader_module_to_handle(module);
103
104 return VK_SUCCESS;
105 }
106
107 void radv_DestroyShaderModule(
108 VkDevice _device,
109 VkShaderModule _module,
110 const VkAllocationCallbacks* pAllocator)
111 {
112 RADV_FROM_HANDLE(radv_device, device, _device);
113 RADV_FROM_HANDLE(radv_shader_module, module, _module);
114
115 if (!module)
116 return;
117
118 vk_free2(&device->alloc, pAllocator, module);
119 }
120
121 void
122 radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
123 bool allow_copies)
124 {
125 bool progress;
126 unsigned lower_flrp =
127 (shader->options->lower_flrp16 ? 16 : 0) |
128 (shader->options->lower_flrp32 ? 32 : 0) |
129 (shader->options->lower_flrp64 ? 64 : 0);
130
131 do {
132 progress = false;
133
134 NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
135 NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
136
137 NIR_PASS_V(shader, nir_lower_vars_to_ssa);
138 NIR_PASS_V(shader, nir_lower_pack);
139
140 if (allow_copies) {
141 /* Only run this pass in the first call to
142 * radv_optimize_nir. Later calls assume that we've
143 * lowered away any copy_deref instructions and we
144 * don't want to introduce any more.
145 */
146 NIR_PASS(progress, shader, nir_opt_find_array_copies);
147 }
148
149 NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
150 NIR_PASS(progress, shader, nir_opt_dead_write_vars);
151
152 NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL);
153 NIR_PASS_V(shader, nir_lower_phis_to_scalar);
154
155 NIR_PASS(progress, shader, nir_copy_prop);
156 NIR_PASS(progress, shader, nir_opt_remove_phis);
157 NIR_PASS(progress, shader, nir_opt_dce);
158 if (nir_opt_trivial_continues(shader)) {
159 progress = true;
160 NIR_PASS(progress, shader, nir_copy_prop);
161 NIR_PASS(progress, shader, nir_opt_remove_phis);
162 NIR_PASS(progress, shader, nir_opt_dce);
163 }
164 NIR_PASS(progress, shader, nir_opt_if, true);
165 NIR_PASS(progress, shader, nir_opt_dead_cf);
166 NIR_PASS(progress, shader, nir_opt_cse);
167 NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
168 NIR_PASS(progress, shader, nir_opt_constant_folding);
169 NIR_PASS(progress, shader, nir_opt_algebraic);
170
171 if (lower_flrp != 0) {
172 bool lower_flrp_progress = false;
173 NIR_PASS(lower_flrp_progress,
174 shader,
175 nir_lower_flrp,
176 lower_flrp,
177 false /* always_precise */,
178 shader->options->lower_ffma);
179 if (lower_flrp_progress) {
180 NIR_PASS(progress, shader,
181 nir_opt_constant_folding);
182 progress = true;
183 }
184
185 /* Nothing should rematerialize any flrps, so we only
186 * need to do this lowering once.
187 */
188 lower_flrp = 0;
189 }
190
191 NIR_PASS(progress, shader, nir_opt_undef);
192 NIR_PASS(progress, shader, nir_opt_conditional_discard);
193 if (shader->options->max_unroll_iterations) {
194 NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
195 }
196 } while (progress && !optimize_conservatively);
197
198 NIR_PASS(progress, shader, nir_opt_shrink_load);
199 NIR_PASS(progress, shader, nir_opt_move_load_ubo);
200 }
201
202 nir_shader *
203 radv_shader_compile_to_nir(struct radv_device *device,
204 struct radv_shader_module *module,
205 const char *entrypoint_name,
206 gl_shader_stage stage,
207 const VkSpecializationInfo *spec_info,
208 const VkPipelineCreateFlags flags,
209 const struct radv_pipeline_layout *layout)
210 {
211 nir_shader *nir;
212 if (module->nir) {
213 /* Some things such as our meta clear/blit code will give us a NIR
214 * shader directly. In that case, we just ignore the SPIR-V entirely
215 * and just use the NIR shader */
216 nir = module->nir;
217 nir->options = &nir_options;
218 nir_validate_shader(nir, "in internal shader");
219
220 assert(exec_list_length(&nir->functions) == 1);
221 } else {
222 uint32_t *spirv = (uint32_t *) module->data;
223 assert(module->size % 4 == 0);
224
225 if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
226 radv_print_spirv(spirv, module->size, stderr);
227
228 uint32_t num_spec_entries = 0;
229 struct nir_spirv_specialization *spec_entries = NULL;
230 if (spec_info && spec_info->mapEntryCount > 0) {
231 num_spec_entries = spec_info->mapEntryCount;
232 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
233 for (uint32_t i = 0; i < num_spec_entries; i++) {
234 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
235 const void *data = spec_info->pData + entry.offset;
236 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
237
238 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
239 if (spec_info->dataSize == 8)
240 spec_entries[i].data64 = *(const uint64_t *)data;
241 else
242 spec_entries[i].data32 = *(const uint32_t *)data;
243 }
244 }
245 const struct spirv_to_nir_options spirv_options = {
246 .lower_ubo_ssbo_access_to_offsets = true,
247 .caps = {
248 .amd_gcn_shader = true,
249 .amd_shader_ballot = device->instance->perftest_flags & RADV_PERFTEST_SHADER_BALLOT,
250 .amd_trinary_minmax = true,
251 .derivative_group = true,
252 .descriptor_array_dynamic_indexing = true,
253 .descriptor_array_non_uniform_indexing = true,
254 .descriptor_indexing = true,
255 .device_group = true,
256 .draw_parameters = true,
257 .float16 = true,
258 .float64 = true,
259 .geometry_streams = true,
260 .image_read_without_format = true,
261 .image_write_without_format = true,
262 .int8 = true,
263 .int16 = true,
264 .int64 = true,
265 .int64_atomics = true,
266 .multiview = true,
267 .physical_storage_buffer_address = true,
268 .runtime_descriptor_array = true,
269 .shader_viewport_index_layer = true,
270 .stencil_export = true,
271 .storage_8bit = true,
272 .storage_16bit = true,
273 .storage_image_ms = true,
274 .subgroup_arithmetic = true,
275 .subgroup_ballot = true,
276 .subgroup_basic = true,
277 .subgroup_quad = true,
278 .subgroup_shuffle = true,
279 .subgroup_vote = true,
280 .tessellation = true,
281 .transform_feedback = true,
282 .variable_pointers = true,
283 },
284 .ubo_addr_format = nir_address_format_32bit_index_offset,
285 .ssbo_addr_format = nir_address_format_32bit_index_offset,
286 .phys_ssbo_addr_format = nir_address_format_64bit_global,
287 .push_const_addr_format = nir_address_format_logical,
288 .shared_addr_format = nir_address_format_32bit_offset,
289 };
290 nir = spirv_to_nir(spirv, module->size / 4,
291 spec_entries, num_spec_entries,
292 stage, entrypoint_name,
293 &spirv_options, &nir_options);
294 assert(nir->info.stage == stage);
295 nir_validate_shader(nir, "after spirv_to_nir");
296
297 free(spec_entries);
298
299 /* We have to lower away local constant initializers right before we
300 * inline functions. That way they get properly initialized at the top
301 * of the function and not at the top of its caller.
302 */
303 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
304 NIR_PASS_V(nir, nir_lower_returns);
305 NIR_PASS_V(nir, nir_inline_functions);
306 NIR_PASS_V(nir, nir_opt_deref);
307
308 /* Pick off the single entrypoint that we want */
309 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
310 if (func->is_entrypoint)
311 func->name = ralloc_strdup(func, "main");
312 else
313 exec_node_remove(&func->node);
314 }
315 assert(exec_list_length(&nir->functions) == 1);
316
317 /* Make sure we lower constant initializers on output variables so that
318 * nir_remove_dead_variables below sees the corresponding stores
319 */
320 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
321
322 /* Now that we've deleted all but the main function, we can go ahead and
323 * lower the rest of the constant initializers.
324 */
325 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
326
327 /* Split member structs. We do this before lower_io_to_temporaries so that
328 * it doesn't lower system values to temporaries by accident.
329 */
330 NIR_PASS_V(nir, nir_split_var_copies);
331 NIR_PASS_V(nir, nir_split_per_member_structs);
332
333 NIR_PASS_V(nir, nir_remove_dead_variables,
334 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
335
336 NIR_PASS_V(nir, nir_lower_system_values);
337 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
338 NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
339 }
340
341 /* Vulkan uses the separate-shader linking model */
342 nir->info.separate_shader = true;
343
344 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
345
346 static const nir_lower_tex_options tex_options = {
347 .lower_txp = ~0,
348 .lower_tg4_offsets = true,
349 };
350
351 nir_lower_tex(nir, &tex_options);
352
353 nir_lower_vars_to_ssa(nir);
354
355 if (nir->info.stage == MESA_SHADER_VERTEX ||
356 nir->info.stage == MESA_SHADER_GEOMETRY) {
357 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
358 nir_shader_get_entrypoint(nir), true, true);
359 } else if (nir->info.stage == MESA_SHADER_TESS_EVAL||
360 nir->info.stage == MESA_SHADER_FRAGMENT) {
361 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
362 nir_shader_get_entrypoint(nir), true, false);
363 }
364
365 nir_split_var_copies(nir);
366
367 nir_lower_global_vars_to_local(nir);
368 nir_remove_dead_variables(nir, nir_var_function_temp);
369 nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
370 .subgroup_size = 64,
371 .ballot_bit_size = 64,
372 .lower_to_scalar = 1,
373 .lower_subgroup_masks = 1,
374 .lower_shuffle = 1,
375 .lower_shuffle_to_32bit = 1,
376 .lower_vote_eq_to_ballot = 1,
377 });
378
379 nir_lower_load_const_to_scalar(nir);
380
381 if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
382 radv_optimize_nir(nir, false, true);
383
384 /* We call nir_lower_var_copies() after the first radv_optimize_nir()
385 * to remove any copies introduced by nir_opt_find_array_copies().
386 */
387 nir_lower_var_copies(nir);
388
389 /* Indirect lowering must be called after the radv_optimize_nir() loop
390 * has been called at least once. Otherwise indirect lowering can
391 * bloat the instruction count of the loop and cause it to be
392 * considered too large for unrolling.
393 */
394 ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
395 radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, false);
396
397 return nir;
398 }
399
400 void *
401 radv_alloc_shader_memory(struct radv_device *device,
402 struct radv_shader_variant *shader)
403 {
404 mtx_lock(&device->shader_slab_mutex);
405 list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
406 uint64_t offset = 0;
407 list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
408 if (s->bo_offset - offset >= shader->code_size) {
409 shader->bo = slab->bo;
410 shader->bo_offset = offset;
411 list_addtail(&shader->slab_list, &s->slab_list);
412 mtx_unlock(&device->shader_slab_mutex);
413 return slab->ptr + offset;
414 }
415 offset = align_u64(s->bo_offset + s->code_size, 256);
416 }
417 if (slab->size - offset >= shader->code_size) {
418 shader->bo = slab->bo;
419 shader->bo_offset = offset;
420 list_addtail(&shader->slab_list, &slab->shaders);
421 mtx_unlock(&device->shader_slab_mutex);
422 return slab->ptr + offset;
423 }
424 }
425
426 mtx_unlock(&device->shader_slab_mutex);
427 struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
428
429 slab->size = 256 * 1024;
430 slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
431 RADEON_DOMAIN_VRAM,
432 RADEON_FLAG_NO_INTERPROCESS_SHARING |
433 (device->physical_device->cpdma_prefetch_writes_memory ?
434 0 : RADEON_FLAG_READ_ONLY),
435 RADV_BO_PRIORITY_SHADER);
436 slab->ptr = (char*)device->ws->buffer_map(slab->bo);
437 list_inithead(&slab->shaders);
438
439 mtx_lock(&device->shader_slab_mutex);
440 list_add(&slab->slabs, &device->shader_slabs);
441
442 shader->bo = slab->bo;
443 shader->bo_offset = 0;
444 list_add(&shader->slab_list, &slab->shaders);
445 mtx_unlock(&device->shader_slab_mutex);
446 return slab->ptr;
447 }
448
449 void
450 radv_destroy_shader_slabs(struct radv_device *device)
451 {
452 list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
453 device->ws->buffer_destroy(slab->bo);
454 free(slab);
455 }
456 mtx_destroy(&device->shader_slab_mutex);
457 }
458
459 /* For the UMR disassembler. */
460 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
461 #define DEBUGGER_NUM_MARKERS 5
462
463 static unsigned
464 radv_get_shader_binary_size(struct ac_shader_binary *binary)
465 {
466 return binary->code_size + DEBUGGER_NUM_MARKERS * 4;
467 }
468
469 static void
470 radv_fill_shader_variant(struct radv_device *device,
471 struct radv_shader_variant *variant,
472 struct ac_shader_binary *binary,
473 gl_shader_stage stage)
474 {
475 bool scratch_enabled = variant->config.scratch_bytes_per_wave > 0;
476 struct radv_shader_info *info = &variant->info.info;
477 unsigned vgpr_comp_cnt = 0;
478
479 variant->code_size = radv_get_shader_binary_size(binary);
480 variant->rsrc2 = S_00B12C_USER_SGPR(variant->info.num_user_sgprs) |
481 S_00B12C_USER_SGPR_MSB(variant->info.num_user_sgprs >> 5) |
482 S_00B12C_SCRATCH_EN(scratch_enabled) |
483 S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
484 S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
485 S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
486 S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
487 S_00B12C_SO_EN(!!info->so.num_outputs);
488
489 variant->rsrc1 = S_00B848_VGPRS((variant->config.num_vgprs - 1) / 4) |
490 S_00B848_SGPRS((variant->config.num_sgprs - 1) / 8) |
491 S_00B848_DX10_CLAMP(1) |
492 S_00B848_FLOAT_MODE(variant->config.float_mode);
493
494 switch (stage) {
495 case MESA_SHADER_TESS_EVAL:
496 vgpr_comp_cnt = 3;
497 variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
498 break;
499 case MESA_SHADER_TESS_CTRL:
500 if (device->physical_device->rad_info.chip_class >= GFX9) {
501 vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
502 } else {
503 variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
504 }
505 break;
506 case MESA_SHADER_VERTEX:
507 case MESA_SHADER_GEOMETRY:
508 vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
509 break;
510 case MESA_SHADER_FRAGMENT:
511 break;
512 case MESA_SHADER_COMPUTE:
513 variant->rsrc2 |=
514 S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
515 S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
516 S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
517 S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
518 info->cs.uses_thread_id[1] ? 1 : 0) |
519 S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
520 S_00B84C_LDS_SIZE(variant->config.lds_size);
521 break;
522 default:
523 unreachable("unsupported shader type");
524 break;
525 }
526
527 if (device->physical_device->rad_info.chip_class >= GFX9 &&
528 stage == MESA_SHADER_GEOMETRY) {
529 unsigned es_type = variant->info.gs.es_type;
530 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
531
532 if (es_type == MESA_SHADER_VERTEX) {
533 es_vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
534 } else if (es_type == MESA_SHADER_TESS_EVAL) {
535 es_vgpr_comp_cnt = 3;
536 } else {
537 unreachable("invalid shader ES type");
538 }
539
540 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
541 * VGPR[0:4] are always loaded.
542 */
543 if (info->uses_invocation_id) {
544 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
545 } else if (info->uses_prim_id) {
546 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
547 } else if (variant->info.gs.vertices_in >= 3) {
548 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
549 } else {
550 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
551 }
552
553 variant->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
554 variant->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
555 S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
556 } else if (device->physical_device->rad_info.chip_class >= GFX9 &&
557 stage == MESA_SHADER_TESS_CTRL) {
558 variant->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
559 } else {
560 variant->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
561 }
562
563 void *ptr = radv_alloc_shader_memory(device, variant);
564 memcpy(ptr, binary->code, binary->code_size);
565
566 /* Add end-of-code markers for the UMR disassembler. */
567 uint32_t *ptr32 = (uint32_t *)ptr + binary->code_size / 4;
568 for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
569 ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
570
571 }
572
573 static void radv_init_llvm_target()
574 {
575 LLVMInitializeAMDGPUTargetInfo();
576 LLVMInitializeAMDGPUTarget();
577 LLVMInitializeAMDGPUTargetMC();
578 LLVMInitializeAMDGPUAsmPrinter();
579
580 /* For inline assembly. */
581 LLVMInitializeAMDGPUAsmParser();
582
583 /* Workaround for bug in llvm 4.0 that causes image intrinsics
584 * to disappear.
585 * https://reviews.llvm.org/D26348
586 *
587 * Workaround for bug in llvm that causes the GPU to hang in presence
588 * of nested loops because there is an exec mask issue. The proper
589 * solution is to fix LLVM but this might require a bunch of work.
590 * https://bugs.llvm.org/show_bug.cgi?id=37744
591 *
592 * "mesa" is the prefix for error messages.
593 */
594 if (HAVE_LLVM >= 0x0800) {
595 const char *argv[2] = { "mesa", "-simplifycfg-sink-common=false" };
596 LLVMParseCommandLineOptions(2, argv, NULL);
597
598 } else {
599 const char *argv[3] = { "mesa", "-simplifycfg-sink-common=false",
600 "-amdgpu-skip-threshold=1" };
601 LLVMParseCommandLineOptions(3, argv, NULL);
602 }
603 }
604
605 static once_flag radv_init_llvm_target_once_flag = ONCE_FLAG_INIT;
606
607 static void radv_init_llvm_once(void)
608 {
609 call_once(&radv_init_llvm_target_once_flag, radv_init_llvm_target);
610 }
611
612 static struct radv_shader_variant *
613 shader_variant_create(struct radv_device *device,
614 struct radv_shader_module *module,
615 struct nir_shader * const *shaders,
616 int shader_count,
617 gl_shader_stage stage,
618 struct radv_nir_compiler_options *options,
619 bool gs_copy_shader,
620 void **code_out,
621 unsigned *code_size_out)
622 {
623 enum radeon_family chip_family = device->physical_device->rad_info.family;
624 enum ac_target_machine_options tm_options = 0;
625 struct radv_shader_variant *variant;
626 struct ac_shader_binary binary;
627 struct ac_llvm_compiler ac_llvm;
628 bool thread_compiler;
629 variant = calloc(1, sizeof(struct radv_shader_variant));
630 if (!variant)
631 return NULL;
632
633 options->family = chip_family;
634 options->chip_class = device->physical_device->rad_info.chip_class;
635 options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
636 options->dump_preoptir = options->dump_shader &&
637 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
638 options->record_llvm_ir = device->keep_shader_info;
639 options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
640 options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
641 options->address32_hi = device->physical_device->rad_info.address32_hi;
642
643 if (options->supports_spill)
644 tm_options |= AC_TM_SUPPORTS_SPILL;
645 if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
646 tm_options |= AC_TM_SISCHED;
647 if (options->check_ir)
648 tm_options |= AC_TM_CHECK_IR;
649 if (device->instance->debug_flags & RADV_DEBUG_NO_LOAD_STORE_OPT)
650 tm_options |= AC_TM_NO_LOAD_STORE_OPT;
651
652 thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
653 radv_init_llvm_once();
654 radv_init_llvm_compiler(&ac_llvm,
655 thread_compiler,
656 chip_family, tm_options);
657 if (gs_copy_shader) {
658 assert(shader_count == 1);
659 radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
660 &variant->config, &variant->info,
661 options);
662 } else {
663 radv_compile_nir_shader(&ac_llvm, &binary, &variant->config,
664 &variant->info, shaders, shader_count,
665 options);
666 }
667
668 radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
669
670 radv_fill_shader_variant(device, variant, &binary, stage);
671
672 if (code_out) {
673 *code_out = binary.code;
674 *code_size_out = binary.code_size;
675 } else
676 free(binary.code);
677 free(binary.config);
678 free(binary.rodata);
679 free(binary.global_symbol_offsets);
680 free(binary.relocs);
681 variant->ref_count = 1;
682
683 if (device->keep_shader_info) {
684 variant->disasm_string = binary.disasm_string;
685 variant->llvm_ir_string = binary.llvm_ir_string;
686 if (!gs_copy_shader && !module->nir) {
687 variant->nir = *shaders;
688 variant->spirv = (uint32_t *)module->data;
689 variant->spirv_size = module->size;
690 }
691 } else {
692 free(binary.disasm_string);
693 }
694
695 return variant;
696 }
697
698 struct radv_shader_variant *
699 radv_shader_variant_create(struct radv_device *device,
700 struct radv_shader_module *module,
701 struct nir_shader *const *shaders,
702 int shader_count,
703 struct radv_pipeline_layout *layout,
704 const struct radv_shader_variant_key *key,
705 void **code_out,
706 unsigned *code_size_out)
707 {
708 struct radv_nir_compiler_options options = {0};
709
710 options.layout = layout;
711 if (key)
712 options.key = *key;
713
714 options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
715 options.supports_spill = true;
716
717 return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
718 &options, false, code_out, code_size_out);
719 }
720
721 struct radv_shader_variant *
722 radv_create_gs_copy_shader(struct radv_device *device,
723 struct nir_shader *shader,
724 void **code_out,
725 unsigned *code_size_out,
726 bool multiview)
727 {
728 struct radv_nir_compiler_options options = {0};
729
730 options.key.has_multiview_view_index = multiview;
731
732 return shader_variant_create(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
733 &options, true, code_out, code_size_out);
734 }
735
736 void
737 radv_shader_variant_destroy(struct radv_device *device,
738 struct radv_shader_variant *variant)
739 {
740 if (!p_atomic_dec_zero(&variant->ref_count))
741 return;
742
743 mtx_lock(&device->shader_slab_mutex);
744 list_del(&variant->slab_list);
745 mtx_unlock(&device->shader_slab_mutex);
746
747 ralloc_free(variant->nir);
748 free(variant->disasm_string);
749 free(variant->llvm_ir_string);
750 free(variant);
751 }
752
753 const char *
754 radv_get_shader_name(struct radv_shader_variant *var, gl_shader_stage stage)
755 {
756 switch (stage) {
757 case MESA_SHADER_VERTEX: return var->info.vs.as_ls ? "Vertex Shader as LS" : var->info.vs.as_es ? "Vertex Shader as ES" : "Vertex Shader as VS";
758 case MESA_SHADER_GEOMETRY: return "Geometry Shader";
759 case MESA_SHADER_FRAGMENT: return "Pixel Shader";
760 case MESA_SHADER_COMPUTE: return "Compute Shader";
761 case MESA_SHADER_TESS_CTRL: return "Tessellation Control Shader";
762 case MESA_SHADER_TESS_EVAL: return var->info.tes.as_es ? "Tessellation Evaluation Shader as ES" : "Tessellation Evaluation Shader as VS";
763 default:
764 return "Unknown shader";
765 };
766 }
767
768 static void
769 generate_shader_stats(struct radv_device *device,
770 struct radv_shader_variant *variant,
771 gl_shader_stage stage,
772 struct _mesa_string_buffer *buf)
773 {
774 enum chip_class chip_class = device->physical_device->rad_info.chip_class;
775 unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
776 struct ac_shader_config *conf;
777 unsigned max_simd_waves;
778 unsigned lds_per_wave = 0;
779
780 max_simd_waves = ac_get_max_simd_waves(device->physical_device->rad_info.family);
781
782 conf = &variant->config;
783
784 if (stage == MESA_SHADER_FRAGMENT) {
785 lds_per_wave = conf->lds_size * lds_increment +
786 align(variant->info.fs.num_interp * 48,
787 lds_increment);
788 } else if (stage == MESA_SHADER_COMPUTE) {
789 unsigned max_workgroup_size =
790 radv_nir_get_max_workgroup_size(chip_class, variant->nir);
791 lds_per_wave = (conf->lds_size * lds_increment) /
792 DIV_ROUND_UP(max_workgroup_size, 64);
793 }
794
795 if (conf->num_sgprs)
796 max_simd_waves =
797 MIN2(max_simd_waves,
798 ac_get_num_physical_sgprs(chip_class) / conf->num_sgprs);
799
800 if (conf->num_vgprs)
801 max_simd_waves =
802 MIN2(max_simd_waves,
803 RADV_NUM_PHYSICAL_VGPRS / conf->num_vgprs);
804
805 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
806 * that PS can use.
807 */
808 if (lds_per_wave)
809 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
810
811 if (stage == MESA_SHADER_FRAGMENT) {
812 _mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
813 "SPI_PS_INPUT_ADDR = 0x%04x\n"
814 "SPI_PS_INPUT_ENA = 0x%04x\n",
815 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
816 }
817
818 _mesa_string_buffer_printf(buf, "*** SHADER STATS ***\n"
819 "SGPRS: %d\n"
820 "VGPRS: %d\n"
821 "Spilled SGPRs: %d\n"
822 "Spilled VGPRs: %d\n"
823 "PrivMem VGPRS: %d\n"
824 "Code Size: %d bytes\n"
825 "LDS: %d blocks\n"
826 "Scratch: %d bytes per wave\n"
827 "Max Waves: %d\n"
828 "********************\n\n\n",
829 conf->num_sgprs, conf->num_vgprs,
830 conf->spilled_sgprs, conf->spilled_vgprs,
831 variant->info.private_mem_vgprs, variant->code_size,
832 conf->lds_size, conf->scratch_bytes_per_wave,
833 max_simd_waves);
834 }
835
836 void
837 radv_shader_dump_stats(struct radv_device *device,
838 struct radv_shader_variant *variant,
839 gl_shader_stage stage,
840 FILE *file)
841 {
842 struct _mesa_string_buffer *buf = _mesa_string_buffer_create(NULL, 256);
843
844 generate_shader_stats(device, variant, stage, buf);
845
846 fprintf(file, "\n%s:\n", radv_get_shader_name(variant, stage));
847 fprintf(file, "%s", buf->buf);
848
849 _mesa_string_buffer_destroy(buf);
850 }
851
852 VkResult
853 radv_GetShaderInfoAMD(VkDevice _device,
854 VkPipeline _pipeline,
855 VkShaderStageFlagBits shaderStage,
856 VkShaderInfoTypeAMD infoType,
857 size_t* pInfoSize,
858 void* pInfo)
859 {
860 RADV_FROM_HANDLE(radv_device, device, _device);
861 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
862 gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
863 struct radv_shader_variant *variant = pipeline->shaders[stage];
864 struct _mesa_string_buffer *buf;
865 VkResult result = VK_SUCCESS;
866
867 /* Spec doesn't indicate what to do if the stage is invalid, so just
868 * return no info for this. */
869 if (!variant)
870 return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
871
872 switch (infoType) {
873 case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
874 if (!pInfo) {
875 *pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
876 } else {
877 unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
878 struct ac_shader_config *conf = &variant->config;
879
880 VkShaderStatisticsInfoAMD statistics = {};
881 statistics.shaderStageMask = shaderStage;
882 statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
883 statistics.numPhysicalSgprs = ac_get_num_physical_sgprs(device->physical_device->rad_info.chip_class);
884 statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
885
886 if (stage == MESA_SHADER_COMPUTE) {
887 unsigned *local_size = variant->nir->info.cs.local_size;
888 unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
889
890 statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
891 ceil((double)workgroup_size / statistics.numPhysicalVgprs);
892
893 statistics.computeWorkGroupSize[0] = local_size[0];
894 statistics.computeWorkGroupSize[1] = local_size[1];
895 statistics.computeWorkGroupSize[2] = local_size[2];
896 } else {
897 statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
898 }
899
900 statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
901 statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
902 statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
903 statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
904 statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
905
906 size_t size = *pInfoSize;
907 *pInfoSize = sizeof(statistics);
908
909 memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
910
911 if (size < *pInfoSize)
912 result = VK_INCOMPLETE;
913 }
914
915 break;
916 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
917 buf = _mesa_string_buffer_create(NULL, 1024);
918
919 _mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(variant, stage));
920 _mesa_string_buffer_printf(buf, "%s\n\n", variant->llvm_ir_string);
921 _mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
922 generate_shader_stats(device, variant, stage, buf);
923
924 /* Need to include the null terminator. */
925 size_t length = buf->length + 1;
926
927 if (!pInfo) {
928 *pInfoSize = length;
929 } else {
930 size_t size = *pInfoSize;
931 *pInfoSize = length;
932
933 memcpy(pInfo, buf->buf, MIN2(size, length));
934
935 if (size < length)
936 result = VK_INCOMPLETE;
937 }
938
939 _mesa_string_buffer_destroy(buf);
940 break;
941 default:
942 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
943 result = VK_ERROR_FEATURE_NOT_PRESENT;
944 break;
945 }
946
947 return result;
948 }