ac: add ac_build_{struct,raw}_tbuffer_load() helpers
[mesa.git] / src / amd / vulkan / radv_shader.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
34 #include "nir/nir.h"
35 #include "nir/nir_builder.h"
36 #include "spirv/nir_spirv.h"
37
38 #include <llvm-c/Core.h>
39 #include <llvm-c/TargetMachine.h>
40 #include <llvm-c/Support.h>
41
42 #include "sid.h"
43 #include "gfx9d.h"
44 #include "ac_binary.h"
45 #include "ac_llvm_util.h"
46 #include "ac_nir_to_llvm.h"
47 #include "vk_format.h"
48 #include "util/debug.h"
49 #include "ac_exp_param.h"
50
51 #include "util/string_buffer.h"
52
53 static const struct nir_shader_compiler_options nir_options = {
54 .vertex_id_zero_based = true,
55 .lower_scmp = true,
56 .lower_flrp32 = true,
57 .lower_flrp64 = true,
58 .lower_device_index_to_zero = true,
59 .lower_fsat = true,
60 .lower_fdiv = true,
61 .lower_sub = true,
62 .lower_pack_snorm_2x16 = true,
63 .lower_pack_snorm_4x8 = true,
64 .lower_pack_unorm_2x16 = true,
65 .lower_pack_unorm_4x8 = true,
66 .lower_unpack_snorm_2x16 = true,
67 .lower_unpack_snorm_4x8 = true,
68 .lower_unpack_unorm_2x16 = true,
69 .lower_unpack_unorm_4x8 = true,
70 .lower_extract_byte = true,
71 .lower_extract_word = true,
72 .lower_ffma = true,
73 .lower_fpow = true,
74 .lower_mul_2x32_64 = true,
75 .max_unroll_iterations = 32
76 };
77
78 VkResult radv_CreateShaderModule(
79 VkDevice _device,
80 const VkShaderModuleCreateInfo* pCreateInfo,
81 const VkAllocationCallbacks* pAllocator,
82 VkShaderModule* pShaderModule)
83 {
84 RADV_FROM_HANDLE(radv_device, device, _device);
85 struct radv_shader_module *module;
86
87 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
88 assert(pCreateInfo->flags == 0);
89
90 module = vk_alloc2(&device->alloc, pAllocator,
91 sizeof(*module) + pCreateInfo->codeSize, 8,
92 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
93 if (module == NULL)
94 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
95
96 module->nir = NULL;
97 module->size = pCreateInfo->codeSize;
98 memcpy(module->data, pCreateInfo->pCode, module->size);
99
100 _mesa_sha1_compute(module->data, module->size, module->sha1);
101
102 *pShaderModule = radv_shader_module_to_handle(module);
103
104 return VK_SUCCESS;
105 }
106
107 void radv_DestroyShaderModule(
108 VkDevice _device,
109 VkShaderModule _module,
110 const VkAllocationCallbacks* pAllocator)
111 {
112 RADV_FROM_HANDLE(radv_device, device, _device);
113 RADV_FROM_HANDLE(radv_shader_module, module, _module);
114
115 if (!module)
116 return;
117
118 vk_free2(&device->alloc, pAllocator, module);
119 }
120
121 void
122 radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
123 bool allow_copies)
124 {
125 bool progress;
126
127 do {
128 progress = false;
129
130 NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
131 NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
132
133 NIR_PASS_V(shader, nir_lower_vars_to_ssa);
134 NIR_PASS_V(shader, nir_lower_pack);
135
136 if (allow_copies) {
137 /* Only run this pass in the first call to
138 * radv_optimize_nir. Later calls assume that we've
139 * lowered away any copy_deref instructions and we
140 * don't want to introduce any more.
141 */
142 NIR_PASS(progress, shader, nir_opt_find_array_copies);
143 }
144
145 NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
146 NIR_PASS(progress, shader, nir_opt_dead_write_vars);
147
148 NIR_PASS_V(shader, nir_lower_alu_to_scalar);
149 NIR_PASS_V(shader, nir_lower_phis_to_scalar);
150
151 NIR_PASS(progress, shader, nir_copy_prop);
152 NIR_PASS(progress, shader, nir_opt_remove_phis);
153 NIR_PASS(progress, shader, nir_opt_dce);
154 if (nir_opt_trivial_continues(shader)) {
155 progress = true;
156 NIR_PASS(progress, shader, nir_copy_prop);
157 NIR_PASS(progress, shader, nir_opt_remove_phis);
158 NIR_PASS(progress, shader, nir_opt_dce);
159 }
160 NIR_PASS(progress, shader, nir_opt_if);
161 NIR_PASS(progress, shader, nir_opt_dead_cf);
162 NIR_PASS(progress, shader, nir_opt_cse);
163 NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
164 NIR_PASS(progress, shader, nir_opt_algebraic);
165 NIR_PASS(progress, shader, nir_opt_constant_folding);
166 NIR_PASS(progress, shader, nir_opt_undef);
167 NIR_PASS(progress, shader, nir_opt_conditional_discard);
168 if (shader->options->max_unroll_iterations) {
169 NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
170 }
171 } while (progress && !optimize_conservatively);
172
173 NIR_PASS(progress, shader, nir_opt_shrink_load);
174 NIR_PASS(progress, shader, nir_opt_move_load_ubo);
175 }
176
177 nir_shader *
178 radv_shader_compile_to_nir(struct radv_device *device,
179 struct radv_shader_module *module,
180 const char *entrypoint_name,
181 gl_shader_stage stage,
182 const VkSpecializationInfo *spec_info,
183 const VkPipelineCreateFlags flags)
184 {
185 nir_shader *nir;
186 nir_function *entry_point;
187 if (module->nir) {
188 /* Some things such as our meta clear/blit code will give us a NIR
189 * shader directly. In that case, we just ignore the SPIR-V entirely
190 * and just use the NIR shader */
191 nir = module->nir;
192 nir->options = &nir_options;
193 nir_validate_shader(nir, "in internal shader");
194
195 assert(exec_list_length(&nir->functions) == 1);
196 struct exec_node *node = exec_list_get_head(&nir->functions);
197 entry_point = exec_node_data(nir_function, node, node);
198 } else {
199 uint32_t *spirv = (uint32_t *) module->data;
200 assert(module->size % 4 == 0);
201
202 if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
203 radv_print_spirv(spirv, module->size, stderr);
204
205 uint32_t num_spec_entries = 0;
206 struct nir_spirv_specialization *spec_entries = NULL;
207 if (spec_info && spec_info->mapEntryCount > 0) {
208 num_spec_entries = spec_info->mapEntryCount;
209 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
210 for (uint32_t i = 0; i < num_spec_entries; i++) {
211 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
212 const void *data = spec_info->pData + entry.offset;
213 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
214
215 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
216 if (spec_info->dataSize == 8)
217 spec_entries[i].data64 = *(const uint64_t *)data;
218 else
219 spec_entries[i].data32 = *(const uint32_t *)data;
220 }
221 }
222 const struct spirv_to_nir_options spirv_options = {
223 .lower_ubo_ssbo_access_to_offsets = true,
224 .caps = {
225 .descriptor_array_dynamic_indexing = true,
226 .device_group = true,
227 .draw_parameters = true,
228 .float64 = true,
229 .gcn_shader = true,
230 .geometry_streams = true,
231 .image_read_without_format = true,
232 .image_write_without_format = true,
233 .int16 = true,
234 .int64 = true,
235 .multiview = true,
236 .physical_storage_buffer_address = true,
237 .runtime_descriptor_array = true,
238 .shader_viewport_index_layer = true,
239 .stencil_export = true,
240 .storage_16bit = true,
241 .storage_image_ms = true,
242 .subgroup_arithmetic = true,
243 .subgroup_ballot = true,
244 .subgroup_basic = true,
245 .subgroup_quad = true,
246 .subgroup_shuffle = true,
247 .subgroup_vote = true,
248 .tessellation = true,
249 .transform_feedback = true,
250 .trinary_minmax = true,
251 .variable_pointers = true,
252 },
253 .ubo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
254 .ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT, 2),
255 .phys_ssbo_ptr_type = glsl_vector_type(GLSL_TYPE_UINT64, 1),
256 .push_const_ptr_type = glsl_uint_type(),
257 .shared_ptr_type = glsl_uint_type(),
258 };
259 entry_point = spirv_to_nir(spirv, module->size / 4,
260 spec_entries, num_spec_entries,
261 stage, entrypoint_name,
262 &spirv_options, &nir_options);
263 nir = entry_point->shader;
264 assert(nir->info.stage == stage);
265 nir_validate_shader(nir, "after spirv_to_nir");
266
267 free(spec_entries);
268
269 /* We have to lower away local constant initializers right before we
270 * inline functions. That way they get properly initialized at the top
271 * of the function and not at the top of its caller.
272 */
273 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
274 NIR_PASS_V(nir, nir_lower_returns);
275 NIR_PASS_V(nir, nir_inline_functions);
276 NIR_PASS_V(nir, nir_opt_deref);
277
278 /* Pick off the single entrypoint that we want */
279 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
280 if (func != entry_point)
281 exec_node_remove(&func->node);
282 }
283 assert(exec_list_length(&nir->functions) == 1);
284 entry_point->name = ralloc_strdup(entry_point, "main");
285
286 /* Make sure we lower constant initializers on output variables so that
287 * nir_remove_dead_variables below sees the corresponding stores
288 */
289 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
290
291 /* Now that we've deleted all but the main function, we can go ahead and
292 * lower the rest of the constant initializers.
293 */
294 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
295
296 /* Split member structs. We do this before lower_io_to_temporaries so that
297 * it doesn't lower system values to temporaries by accident.
298 */
299 NIR_PASS_V(nir, nir_split_var_copies);
300 NIR_PASS_V(nir, nir_split_per_member_structs);
301
302 NIR_PASS_V(nir, nir_remove_dead_variables,
303 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
304
305 NIR_PASS_V(nir, nir_lower_system_values);
306 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
307 }
308
309 /* Vulkan uses the separate-shader linking model */
310 nir->info.separate_shader = true;
311
312 nir_shader_gather_info(nir, entry_point->impl);
313
314 static const nir_lower_tex_options tex_options = {
315 .lower_txp = ~0,
316 };
317
318 nir_lower_tex(nir, &tex_options);
319
320 nir_lower_vars_to_ssa(nir);
321
322 if (nir->info.stage == MESA_SHADER_VERTEX ||
323 nir->info.stage == MESA_SHADER_GEOMETRY) {
324 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
325 nir_shader_get_entrypoint(nir), true, true);
326 } else if (nir->info.stage == MESA_SHADER_TESS_EVAL||
327 nir->info.stage == MESA_SHADER_FRAGMENT) {
328 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
329 nir_shader_get_entrypoint(nir), true, false);
330 }
331
332 nir_split_var_copies(nir);
333
334 nir_lower_global_vars_to_local(nir);
335 nir_remove_dead_variables(nir, nir_var_function_temp);
336 nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
337 .subgroup_size = 64,
338 .ballot_bit_size = 64,
339 .lower_to_scalar = 1,
340 .lower_subgroup_masks = 1,
341 .lower_shuffle = 1,
342 .lower_shuffle_to_32bit = 1,
343 .lower_vote_eq_to_ballot = 1,
344 });
345
346 nir_lower_load_const_to_scalar(nir);
347
348 if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
349 radv_optimize_nir(nir, false, true);
350
351 /* We call nir_lower_var_copies() after the first radv_optimize_nir()
352 * to remove any copies introduced by nir_opt_find_array_copies().
353 */
354 nir_lower_var_copies(nir);
355
356 /* Indirect lowering must be called after the radv_optimize_nir() loop
357 * has been called at least once. Otherwise indirect lowering can
358 * bloat the instruction count of the loop and cause it to be
359 * considered too large for unrolling.
360 */
361 ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
362 radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, false);
363
364 return nir;
365 }
366
367 void *
368 radv_alloc_shader_memory(struct radv_device *device,
369 struct radv_shader_variant *shader)
370 {
371 mtx_lock(&device->shader_slab_mutex);
372 list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
373 uint64_t offset = 0;
374 list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
375 if (s->bo_offset - offset >= shader->code_size) {
376 shader->bo = slab->bo;
377 shader->bo_offset = offset;
378 list_addtail(&shader->slab_list, &s->slab_list);
379 mtx_unlock(&device->shader_slab_mutex);
380 return slab->ptr + offset;
381 }
382 offset = align_u64(s->bo_offset + s->code_size, 256);
383 }
384 if (slab->size - offset >= shader->code_size) {
385 shader->bo = slab->bo;
386 shader->bo_offset = offset;
387 list_addtail(&shader->slab_list, &slab->shaders);
388 mtx_unlock(&device->shader_slab_mutex);
389 return slab->ptr + offset;
390 }
391 }
392
393 mtx_unlock(&device->shader_slab_mutex);
394 struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
395
396 slab->size = 256 * 1024;
397 slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
398 RADEON_DOMAIN_VRAM,
399 RADEON_FLAG_NO_INTERPROCESS_SHARING |
400 (device->physical_device->cpdma_prefetch_writes_memory ?
401 0 : RADEON_FLAG_READ_ONLY),
402 RADV_BO_PRIORITY_SHADER);
403 slab->ptr = (char*)device->ws->buffer_map(slab->bo);
404 list_inithead(&slab->shaders);
405
406 mtx_lock(&device->shader_slab_mutex);
407 list_add(&slab->slabs, &device->shader_slabs);
408
409 shader->bo = slab->bo;
410 shader->bo_offset = 0;
411 list_add(&shader->slab_list, &slab->shaders);
412 mtx_unlock(&device->shader_slab_mutex);
413 return slab->ptr;
414 }
415
416 void
417 radv_destroy_shader_slabs(struct radv_device *device)
418 {
419 list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
420 device->ws->buffer_destroy(slab->bo);
421 free(slab);
422 }
423 mtx_destroy(&device->shader_slab_mutex);
424 }
425
426 /* For the UMR disassembler. */
427 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
428 #define DEBUGGER_NUM_MARKERS 5
429
430 static unsigned
431 radv_get_shader_binary_size(struct ac_shader_binary *binary)
432 {
433 return binary->code_size + DEBUGGER_NUM_MARKERS * 4;
434 }
435
436 static void
437 radv_fill_shader_variant(struct radv_device *device,
438 struct radv_shader_variant *variant,
439 struct ac_shader_binary *binary,
440 gl_shader_stage stage)
441 {
442 bool scratch_enabled = variant->config.scratch_bytes_per_wave > 0;
443 struct radv_shader_info *info = &variant->info.info;
444 unsigned vgpr_comp_cnt = 0;
445
446 variant->code_size = radv_get_shader_binary_size(binary);
447 variant->rsrc2 = S_00B12C_USER_SGPR(variant->info.num_user_sgprs) |
448 S_00B12C_USER_SGPR_MSB(variant->info.num_user_sgprs >> 5) |
449 S_00B12C_SCRATCH_EN(scratch_enabled) |
450 S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
451 S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
452 S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
453 S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
454 S_00B12C_SO_EN(!!info->so.num_outputs);
455
456 variant->rsrc1 = S_00B848_VGPRS((variant->config.num_vgprs - 1) / 4) |
457 S_00B848_SGPRS((variant->config.num_sgprs - 1) / 8) |
458 S_00B848_DX10_CLAMP(1) |
459 S_00B848_FLOAT_MODE(variant->config.float_mode);
460
461 switch (stage) {
462 case MESA_SHADER_TESS_EVAL:
463 vgpr_comp_cnt = 3;
464 variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
465 break;
466 case MESA_SHADER_TESS_CTRL:
467 if (device->physical_device->rad_info.chip_class >= GFX9) {
468 vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
469 } else {
470 variant->rsrc2 |= S_00B12C_OC_LDS_EN(1);
471 }
472 break;
473 case MESA_SHADER_VERTEX:
474 case MESA_SHADER_GEOMETRY:
475 vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
476 break;
477 case MESA_SHADER_FRAGMENT:
478 break;
479 case MESA_SHADER_COMPUTE:
480 variant->rsrc2 |=
481 S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
482 S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
483 S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
484 S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
485 info->cs.uses_thread_id[1] ? 1 : 0) |
486 S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
487 S_00B84C_LDS_SIZE(variant->config.lds_size);
488 break;
489 default:
490 unreachable("unsupported shader type");
491 break;
492 }
493
494 if (device->physical_device->rad_info.chip_class >= GFX9 &&
495 stage == MESA_SHADER_GEOMETRY) {
496 unsigned es_type = variant->info.gs.es_type;
497 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
498
499 if (es_type == MESA_SHADER_VERTEX) {
500 es_vgpr_comp_cnt = variant->info.vs.vgpr_comp_cnt;
501 } else if (es_type == MESA_SHADER_TESS_EVAL) {
502 es_vgpr_comp_cnt = 3;
503 } else {
504 unreachable("invalid shader ES type");
505 }
506
507 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
508 * VGPR[0:4] are always loaded.
509 */
510 if (info->uses_invocation_id) {
511 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
512 } else if (info->uses_prim_id) {
513 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
514 } else if (variant->info.gs.vertices_in >= 3) {
515 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
516 } else {
517 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
518 }
519
520 variant->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
521 variant->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
522 S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
523 } else if (device->physical_device->rad_info.chip_class >= GFX9 &&
524 stage == MESA_SHADER_TESS_CTRL) {
525 variant->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
526 } else {
527 variant->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
528 }
529
530 void *ptr = radv_alloc_shader_memory(device, variant);
531 memcpy(ptr, binary->code, binary->code_size);
532
533 /* Add end-of-code markers for the UMR disassembler. */
534 uint32_t *ptr32 = (uint32_t *)ptr + binary->code_size / 4;
535 for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
536 ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
537
538 }
539
540 static void radv_init_llvm_target()
541 {
542 LLVMInitializeAMDGPUTargetInfo();
543 LLVMInitializeAMDGPUTarget();
544 LLVMInitializeAMDGPUTargetMC();
545 LLVMInitializeAMDGPUAsmPrinter();
546
547 /* For inline assembly. */
548 LLVMInitializeAMDGPUAsmParser();
549
550 /* Workaround for bug in llvm 4.0 that causes image intrinsics
551 * to disappear.
552 * https://reviews.llvm.org/D26348
553 *
554 * Workaround for bug in llvm that causes the GPU to hang in presence
555 * of nested loops because there is an exec mask issue. The proper
556 * solution is to fix LLVM but this might require a bunch of work.
557 * https://bugs.llvm.org/show_bug.cgi?id=37744
558 *
559 * "mesa" is the prefix for error messages.
560 */
561 if (HAVE_LLVM >= 0x0800) {
562 const char *argv[2] = { "mesa", "-simplifycfg-sink-common=false" };
563 LLVMParseCommandLineOptions(2, argv, NULL);
564
565 } else {
566 const char *argv[3] = { "mesa", "-simplifycfg-sink-common=false",
567 "-amdgpu-skip-threshold=1" };
568 LLVMParseCommandLineOptions(3, argv, NULL);
569 }
570 }
571
572 static once_flag radv_init_llvm_target_once_flag = ONCE_FLAG_INIT;
573
574 static void radv_init_llvm_once(void)
575 {
576 call_once(&radv_init_llvm_target_once_flag, radv_init_llvm_target);
577 }
578
579 static struct radv_shader_variant *
580 shader_variant_create(struct radv_device *device,
581 struct radv_shader_module *module,
582 struct nir_shader * const *shaders,
583 int shader_count,
584 gl_shader_stage stage,
585 struct radv_nir_compiler_options *options,
586 bool gs_copy_shader,
587 void **code_out,
588 unsigned *code_size_out)
589 {
590 enum radeon_family chip_family = device->physical_device->rad_info.family;
591 enum ac_target_machine_options tm_options = 0;
592 struct radv_shader_variant *variant;
593 struct ac_shader_binary binary;
594 struct ac_llvm_compiler ac_llvm;
595 bool thread_compiler;
596 variant = calloc(1, sizeof(struct radv_shader_variant));
597 if (!variant)
598 return NULL;
599
600 options->family = chip_family;
601 options->chip_class = device->physical_device->rad_info.chip_class;
602 options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
603 options->dump_preoptir = options->dump_shader &&
604 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
605 options->record_llvm_ir = device->keep_shader_info;
606 options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
607 options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
608 options->address32_hi = device->physical_device->rad_info.address32_hi;
609
610 if (options->supports_spill)
611 tm_options |= AC_TM_SUPPORTS_SPILL;
612 if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
613 tm_options |= AC_TM_SISCHED;
614 if (options->check_ir)
615 tm_options |= AC_TM_CHECK_IR;
616
617 thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
618 radv_init_llvm_once();
619 radv_init_llvm_compiler(&ac_llvm,
620 thread_compiler,
621 chip_family, tm_options);
622 if (gs_copy_shader) {
623 assert(shader_count == 1);
624 radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
625 &variant->config, &variant->info,
626 options);
627 } else {
628 radv_compile_nir_shader(&ac_llvm, &binary, &variant->config,
629 &variant->info, shaders, shader_count,
630 options);
631 }
632
633 radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
634
635 radv_fill_shader_variant(device, variant, &binary, stage);
636
637 if (code_out) {
638 *code_out = binary.code;
639 *code_size_out = binary.code_size;
640 } else
641 free(binary.code);
642 free(binary.config);
643 free(binary.rodata);
644 free(binary.global_symbol_offsets);
645 free(binary.relocs);
646 variant->ref_count = 1;
647
648 if (device->keep_shader_info) {
649 variant->disasm_string = binary.disasm_string;
650 variant->llvm_ir_string = binary.llvm_ir_string;
651 if (!gs_copy_shader && !module->nir) {
652 variant->nir = *shaders;
653 variant->spirv = (uint32_t *)module->data;
654 variant->spirv_size = module->size;
655 }
656 } else {
657 free(binary.disasm_string);
658 }
659
660 return variant;
661 }
662
663 struct radv_shader_variant *
664 radv_shader_variant_create(struct radv_device *device,
665 struct radv_shader_module *module,
666 struct nir_shader *const *shaders,
667 int shader_count,
668 struct radv_pipeline_layout *layout,
669 const struct radv_shader_variant_key *key,
670 void **code_out,
671 unsigned *code_size_out)
672 {
673 struct radv_nir_compiler_options options = {0};
674
675 options.layout = layout;
676 if (key)
677 options.key = *key;
678
679 options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
680 options.supports_spill = true;
681
682 return shader_variant_create(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
683 &options, false, code_out, code_size_out);
684 }
685
686 struct radv_shader_variant *
687 radv_create_gs_copy_shader(struct radv_device *device,
688 struct nir_shader *shader,
689 void **code_out,
690 unsigned *code_size_out,
691 bool multiview)
692 {
693 struct radv_nir_compiler_options options = {0};
694
695 options.key.has_multiview_view_index = multiview;
696
697 return shader_variant_create(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
698 &options, true, code_out, code_size_out);
699 }
700
701 void
702 radv_shader_variant_destroy(struct radv_device *device,
703 struct radv_shader_variant *variant)
704 {
705 if (!p_atomic_dec_zero(&variant->ref_count))
706 return;
707
708 mtx_lock(&device->shader_slab_mutex);
709 list_del(&variant->slab_list);
710 mtx_unlock(&device->shader_slab_mutex);
711
712 ralloc_free(variant->nir);
713 free(variant->disasm_string);
714 free(variant->llvm_ir_string);
715 free(variant);
716 }
717
718 const char *
719 radv_get_shader_name(struct radv_shader_variant *var, gl_shader_stage stage)
720 {
721 switch (stage) {
722 case MESA_SHADER_VERTEX: return var->info.vs.as_ls ? "Vertex Shader as LS" : var->info.vs.as_es ? "Vertex Shader as ES" : "Vertex Shader as VS";
723 case MESA_SHADER_GEOMETRY: return "Geometry Shader";
724 case MESA_SHADER_FRAGMENT: return "Pixel Shader";
725 case MESA_SHADER_COMPUTE: return "Compute Shader";
726 case MESA_SHADER_TESS_CTRL: return "Tessellation Control Shader";
727 case MESA_SHADER_TESS_EVAL: return var->info.tes.as_es ? "Tessellation Evaluation Shader as ES" : "Tessellation Evaluation Shader as VS";
728 default:
729 return "Unknown shader";
730 };
731 }
732
733 static void
734 generate_shader_stats(struct radv_device *device,
735 struct radv_shader_variant *variant,
736 gl_shader_stage stage,
737 struct _mesa_string_buffer *buf)
738 {
739 enum chip_class chip_class = device->physical_device->rad_info.chip_class;
740 unsigned lds_increment = chip_class >= CIK ? 512 : 256;
741 struct ac_shader_config *conf;
742 unsigned max_simd_waves;
743 unsigned lds_per_wave = 0;
744
745 max_simd_waves = ac_get_max_simd_waves(device->physical_device->rad_info.family);
746
747 conf = &variant->config;
748
749 if (stage == MESA_SHADER_FRAGMENT) {
750 lds_per_wave = conf->lds_size * lds_increment +
751 align(variant->info.fs.num_interp * 48,
752 lds_increment);
753 } else if (stage == MESA_SHADER_COMPUTE) {
754 unsigned max_workgroup_size =
755 radv_nir_get_max_workgroup_size(chip_class, variant->nir);
756 lds_per_wave = (conf->lds_size * lds_increment) /
757 DIV_ROUND_UP(max_workgroup_size, 64);
758 }
759
760 if (conf->num_sgprs)
761 max_simd_waves =
762 MIN2(max_simd_waves,
763 ac_get_num_physical_sgprs(chip_class) / conf->num_sgprs);
764
765 if (conf->num_vgprs)
766 max_simd_waves =
767 MIN2(max_simd_waves,
768 RADV_NUM_PHYSICAL_VGPRS / conf->num_vgprs);
769
770 /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
771 * that PS can use.
772 */
773 if (lds_per_wave)
774 max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
775
776 if (stage == MESA_SHADER_FRAGMENT) {
777 _mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
778 "SPI_PS_INPUT_ADDR = 0x%04x\n"
779 "SPI_PS_INPUT_ENA = 0x%04x\n",
780 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
781 }
782
783 _mesa_string_buffer_printf(buf, "*** SHADER STATS ***\n"
784 "SGPRS: %d\n"
785 "VGPRS: %d\n"
786 "Spilled SGPRs: %d\n"
787 "Spilled VGPRs: %d\n"
788 "PrivMem VGPRS: %d\n"
789 "Code Size: %d bytes\n"
790 "LDS: %d blocks\n"
791 "Scratch: %d bytes per wave\n"
792 "Max Waves: %d\n"
793 "********************\n\n\n",
794 conf->num_sgprs, conf->num_vgprs,
795 conf->spilled_sgprs, conf->spilled_vgprs,
796 variant->info.private_mem_vgprs, variant->code_size,
797 conf->lds_size, conf->scratch_bytes_per_wave,
798 max_simd_waves);
799 }
800
801 void
802 radv_shader_dump_stats(struct radv_device *device,
803 struct radv_shader_variant *variant,
804 gl_shader_stage stage,
805 FILE *file)
806 {
807 struct _mesa_string_buffer *buf = _mesa_string_buffer_create(NULL, 256);
808
809 generate_shader_stats(device, variant, stage, buf);
810
811 fprintf(file, "\n%s:\n", radv_get_shader_name(variant, stage));
812 fprintf(file, "%s", buf->buf);
813
814 _mesa_string_buffer_destroy(buf);
815 }
816
817 VkResult
818 radv_GetShaderInfoAMD(VkDevice _device,
819 VkPipeline _pipeline,
820 VkShaderStageFlagBits shaderStage,
821 VkShaderInfoTypeAMD infoType,
822 size_t* pInfoSize,
823 void* pInfo)
824 {
825 RADV_FROM_HANDLE(radv_device, device, _device);
826 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
827 gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
828 struct radv_shader_variant *variant = pipeline->shaders[stage];
829 struct _mesa_string_buffer *buf;
830 VkResult result = VK_SUCCESS;
831
832 /* Spec doesn't indicate what to do if the stage is invalid, so just
833 * return no info for this. */
834 if (!variant)
835 return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
836
837 switch (infoType) {
838 case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
839 if (!pInfo) {
840 *pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
841 } else {
842 unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= CIK ? 512 : 256;
843 struct ac_shader_config *conf = &variant->config;
844
845 VkShaderStatisticsInfoAMD statistics = {};
846 statistics.shaderStageMask = shaderStage;
847 statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
848 statistics.numPhysicalSgprs = ac_get_num_physical_sgprs(device->physical_device->rad_info.chip_class);
849 statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
850
851 if (stage == MESA_SHADER_COMPUTE) {
852 unsigned *local_size = variant->nir->info.cs.local_size;
853 unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
854
855 statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
856 ceil((double)workgroup_size / statistics.numPhysicalVgprs);
857
858 statistics.computeWorkGroupSize[0] = local_size[0];
859 statistics.computeWorkGroupSize[1] = local_size[1];
860 statistics.computeWorkGroupSize[2] = local_size[2];
861 } else {
862 statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
863 }
864
865 statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
866 statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
867 statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
868 statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
869 statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
870
871 size_t size = *pInfoSize;
872 *pInfoSize = sizeof(statistics);
873
874 memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
875
876 if (size < *pInfoSize)
877 result = VK_INCOMPLETE;
878 }
879
880 break;
881 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
882 buf = _mesa_string_buffer_create(NULL, 1024);
883
884 _mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(variant, stage));
885 _mesa_string_buffer_printf(buf, "%s\n\n", variant->llvm_ir_string);
886 _mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
887 generate_shader_stats(device, variant, stage, buf);
888
889 /* Need to include the null terminator. */
890 size_t length = buf->length + 1;
891
892 if (!pInfo) {
893 *pInfoSize = length;
894 } else {
895 size_t size = *pInfoSize;
896 *pInfoSize = length;
897
898 memcpy(pInfo, buf->buf, MIN2(size, length));
899
900 if (size < length)
901 result = VK_INCOMPLETE;
902 }
903
904 _mesa_string_buffer_destroy(buf);
905 break;
906 default:
907 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
908 result = VK_ERROR_FEATURE_NOT_PRESENT;
909 break;
910 }
911
912 return result;
913 }