radv,aco: report ACO errors/warnings back via VK_EXT_debug_report
[mesa.git] / src / amd / vulkan / radv_shader.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * IN THE SOFTWARE.
26 */
27
28 #include "util/mesa-sha1.h"
29 #include "util/u_atomic.h"
30 #include "radv_debug.h"
31 #include "radv_private.h"
32 #include "radv_shader.h"
33 #include "radv_shader_helper.h"
34 #include "radv_shader_args.h"
35 #include "nir/nir.h"
36 #include "nir/nir_builder.h"
37 #include "spirv/nir_spirv.h"
38
39 #include "sid.h"
40 #include "ac_binary.h"
41 #include "ac_llvm_util.h"
42 #include "ac_nir_to_llvm.h"
43 #include "ac_rtld.h"
44 #include "vk_format.h"
45 #include "util/debug.h"
46 #include "ac_exp_param.h"
47
48 #include "aco_interface.h"
49
50 #include "util/string_buffer.h"
51
52 static const struct nir_shader_compiler_options nir_options_llvm = {
53 .vertex_id_zero_based = true,
54 .lower_scmp = true,
55 .lower_flrp16 = true,
56 .lower_flrp32 = true,
57 .lower_flrp64 = true,
58 .lower_device_index_to_zero = true,
59 .lower_fsat = true,
60 .lower_fdiv = true,
61 .lower_fmod = true,
62 .lower_bitfield_insert_to_bitfield_select = true,
63 .lower_bitfield_extract = true,
64 .lower_sub = true,
65 .lower_pack_snorm_2x16 = true,
66 .lower_pack_snorm_4x8 = true,
67 .lower_pack_unorm_2x16 = true,
68 .lower_pack_unorm_4x8 = true,
69 .lower_unpack_snorm_2x16 = true,
70 .lower_unpack_snorm_4x8 = true,
71 .lower_unpack_unorm_2x16 = true,
72 .lower_unpack_unorm_4x8 = true,
73 .lower_extract_byte = true,
74 .lower_extract_word = true,
75 .lower_ffma = true,
76 .lower_fpow = true,
77 .lower_mul_2x32_64 = true,
78 .lower_rotate = true,
79 .use_scoped_barrier = true,
80 .max_unroll_iterations = 32,
81 .use_interpolated_input_intrinsics = true,
82 /* nir_lower_int64() isn't actually called for the LLVM backend, but
83 * this helps the loop unrolling heuristics. */
84 .lower_int64_options = nir_lower_imul64 |
85 nir_lower_imul_high64 |
86 nir_lower_imul_2x32_64 |
87 nir_lower_divmod64 |
88 nir_lower_minmax64 |
89 nir_lower_iabs64,
90 .lower_doubles_options = nir_lower_drcp |
91 nir_lower_dsqrt |
92 nir_lower_drsq |
93 nir_lower_ddiv,
94 };
95
96 static const struct nir_shader_compiler_options nir_options_aco = {
97 .vertex_id_zero_based = true,
98 .lower_scmp = true,
99 .lower_flrp16 = true,
100 .lower_flrp32 = true,
101 .lower_flrp64 = true,
102 .lower_device_index_to_zero = true,
103 .lower_fdiv = true,
104 .lower_fmod = true,
105 .lower_bitfield_insert_to_bitfield_select = true,
106 .lower_bitfield_extract = true,
107 .lower_pack_snorm_2x16 = true,
108 .lower_pack_snorm_4x8 = true,
109 .lower_pack_unorm_2x16 = true,
110 .lower_pack_unorm_4x8 = true,
111 .lower_unpack_snorm_2x16 = true,
112 .lower_unpack_snorm_4x8 = true,
113 .lower_unpack_unorm_2x16 = true,
114 .lower_unpack_unorm_4x8 = true,
115 .lower_unpack_half_2x16 = true,
116 .lower_extract_byte = true,
117 .lower_extract_word = true,
118 .lower_ffma = true,
119 .lower_fpow = true,
120 .lower_mul_2x32_64 = true,
121 .lower_rotate = true,
122 .use_scoped_barrier = true,
123 .max_unroll_iterations = 32,
124 .use_interpolated_input_intrinsics = true,
125 .lower_int64_options = nir_lower_imul64 |
126 nir_lower_imul_high64 |
127 nir_lower_imul_2x32_64 |
128 nir_lower_divmod64 |
129 nir_lower_minmax64 |
130 nir_lower_iabs64,
131 .lower_doubles_options = nir_lower_drcp |
132 nir_lower_dsqrt |
133 nir_lower_drsq |
134 nir_lower_ddiv,
135 };
136
137 bool
138 radv_can_dump_shader(struct radv_device *device,
139 struct radv_shader_module *module,
140 bool is_gs_copy_shader)
141 {
142 if (!(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS))
143 return false;
144 if (module)
145 return !module->nir ||
146 (device->instance->debug_flags & RADV_DEBUG_DUMP_META_SHADERS);
147
148 return is_gs_copy_shader;
149 }
150
151 bool
152 radv_can_dump_shader_stats(struct radv_device *device,
153 struct radv_shader_module *module)
154 {
155 /* Only dump non-meta shader stats. */
156 return device->instance->debug_flags & RADV_DEBUG_DUMP_SHADER_STATS &&
157 module && !module->nir;
158 }
159
160 VkResult radv_CreateShaderModule(
161 VkDevice _device,
162 const VkShaderModuleCreateInfo* pCreateInfo,
163 const VkAllocationCallbacks* pAllocator,
164 VkShaderModule* pShaderModule)
165 {
166 RADV_FROM_HANDLE(radv_device, device, _device);
167 struct radv_shader_module *module;
168
169 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
170 assert(pCreateInfo->flags == 0);
171
172 module = vk_alloc2(&device->vk.alloc, pAllocator,
173 sizeof(*module) + pCreateInfo->codeSize, 8,
174 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
175 if (module == NULL)
176 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
177
178 vk_object_base_init(&device->vk, &module->base,
179 VK_OBJECT_TYPE_SHADER_MODULE);
180
181 module->nir = NULL;
182 module->size = pCreateInfo->codeSize;
183 memcpy(module->data, pCreateInfo->pCode, module->size);
184
185 _mesa_sha1_compute(module->data, module->size, module->sha1);
186
187 *pShaderModule = radv_shader_module_to_handle(module);
188
189 return VK_SUCCESS;
190 }
191
192 void radv_DestroyShaderModule(
193 VkDevice _device,
194 VkShaderModule _module,
195 const VkAllocationCallbacks* pAllocator)
196 {
197 RADV_FROM_HANDLE(radv_device, device, _device);
198 RADV_FROM_HANDLE(radv_shader_module, module, _module);
199
200 if (!module)
201 return;
202
203 vk_object_base_finish(&module->base);
204 vk_free2(&device->vk.alloc, pAllocator, module);
205 }
206
207 void
208 radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
209 bool allow_copies)
210 {
211 bool progress;
212 unsigned lower_flrp =
213 (shader->options->lower_flrp16 ? 16 : 0) |
214 (shader->options->lower_flrp32 ? 32 : 0) |
215 (shader->options->lower_flrp64 ? 64 : 0);
216
217 do {
218 progress = false;
219
220 NIR_PASS(progress, shader, nir_split_array_vars, nir_var_function_temp);
221 NIR_PASS(progress, shader, nir_shrink_vec_array_vars, nir_var_function_temp);
222
223 NIR_PASS_V(shader, nir_lower_vars_to_ssa);
224 NIR_PASS_V(shader, nir_lower_pack);
225
226 if (allow_copies) {
227 /* Only run this pass in the first call to
228 * radv_optimize_nir. Later calls assume that we've
229 * lowered away any copy_deref instructions and we
230 * don't want to introduce any more.
231 */
232 NIR_PASS(progress, shader, nir_opt_find_array_copies);
233 }
234
235 NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
236 NIR_PASS(progress, shader, nir_opt_dead_write_vars);
237 NIR_PASS(progress, shader, nir_remove_dead_variables,
238 nir_var_function_temp | nir_var_shader_in | nir_var_shader_out,
239 NULL);
240
241 NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL, NULL);
242 NIR_PASS_V(shader, nir_lower_phis_to_scalar);
243
244 NIR_PASS(progress, shader, nir_copy_prop);
245 NIR_PASS(progress, shader, nir_opt_remove_phis);
246 NIR_PASS(progress, shader, nir_opt_dce);
247 if (nir_opt_trivial_continues(shader)) {
248 progress = true;
249 NIR_PASS(progress, shader, nir_copy_prop);
250 NIR_PASS(progress, shader, nir_opt_remove_phis);
251 NIR_PASS(progress, shader, nir_opt_dce);
252 }
253 NIR_PASS(progress, shader, nir_opt_if, true);
254 NIR_PASS(progress, shader, nir_opt_dead_cf);
255 NIR_PASS(progress, shader, nir_opt_cse);
256 NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
257 NIR_PASS(progress, shader, nir_opt_constant_folding);
258 NIR_PASS(progress, shader, nir_opt_algebraic);
259
260 if (lower_flrp != 0) {
261 bool lower_flrp_progress = false;
262 NIR_PASS(lower_flrp_progress,
263 shader,
264 nir_lower_flrp,
265 lower_flrp,
266 false /* always_precise */,
267 shader->options->lower_ffma);
268 if (lower_flrp_progress) {
269 NIR_PASS(progress, shader,
270 nir_opt_constant_folding);
271 progress = true;
272 }
273
274 /* Nothing should rematerialize any flrps, so we only
275 * need to do this lowering once.
276 */
277 lower_flrp = 0;
278 }
279
280 NIR_PASS(progress, shader, nir_opt_undef);
281 if (shader->options->max_unroll_iterations) {
282 NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
283 }
284 } while (progress && !optimize_conservatively);
285
286 NIR_PASS(progress, shader, nir_opt_conditional_discard);
287 NIR_PASS(progress, shader, nir_opt_shrink_vectors);
288 NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
289 }
290
291 static void
292 shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
293 {
294 assert(glsl_type_is_vector_or_scalar(type));
295
296 uint32_t comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
297 unsigned length = glsl_get_vector_elements(type);
298 *size = comp_size * length,
299 *align = comp_size;
300 }
301
302 struct radv_shader_debug_data {
303 struct radv_device *device;
304 const struct radv_shader_module *module;
305 };
306
307 static void radv_spirv_nir_debug(void *private_data,
308 enum nir_spirv_debug_level level,
309 size_t spirv_offset,
310 const char *message)
311 {
312 struct radv_shader_debug_data *debug_data = private_data;
313 struct radv_instance *instance = debug_data->device->instance;
314
315 static const VkDebugReportFlagsEXT vk_flags[] = {
316 [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
317 [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
318 [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
319 };
320 char buffer[256];
321
322 snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s",
323 (unsigned long)spirv_offset, message);
324
325 vk_debug_report(&instance->debug_report_callbacks,
326 vk_flags[level],
327 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
328 (uint64_t)(uintptr_t)debug_data->module,
329 0, 0, "radv", buffer);
330 }
331
332 static void radv_compiler_debug(void *private_data,
333 enum radv_compiler_debug_level level,
334 const char *message)
335 {
336 struct radv_shader_debug_data *debug_data = private_data;
337 struct radv_instance *instance = debug_data->device->instance;
338
339 static const VkDebugReportFlagsEXT vk_flags[] = {
340 [RADV_COMPILER_DEBUG_LEVEL_PERFWARN] = VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
341 [RADV_COMPILER_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
342 };
343
344 /* VK_DEBUG_REPORT_DEBUG_BIT_EXT specifies diagnostic information
345 * from the implementation and layers.
346 */
347 vk_debug_report(&instance->debug_report_callbacks,
348 vk_flags[level] | VK_DEBUG_REPORT_DEBUG_BIT_EXT,
349 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
350 (uint64_t)(uintptr_t)debug_data->module,
351 0, 0, "radv", message);
352 }
353
354 nir_shader *
355 radv_shader_compile_to_nir(struct radv_device *device,
356 struct radv_shader_module *module,
357 const char *entrypoint_name,
358 gl_shader_stage stage,
359 const VkSpecializationInfo *spec_info,
360 const VkPipelineCreateFlags flags,
361 const struct radv_pipeline_layout *layout,
362 unsigned subgroup_size, unsigned ballot_bit_size)
363 {
364 nir_shader *nir;
365 const nir_shader_compiler_options *nir_options =
366 radv_use_llvm_for_stage(device, stage) ? &nir_options_llvm : &nir_options_aco;
367
368 if (module->nir) {
369 /* Some things such as our meta clear/blit code will give us a NIR
370 * shader directly. In that case, we just ignore the SPIR-V entirely
371 * and just use the NIR shader */
372 nir = module->nir;
373 nir->options = nir_options;
374 nir_validate_shader(nir, "in internal shader");
375
376 assert(exec_list_length(&nir->functions) == 1);
377 } else {
378 uint32_t *spirv = (uint32_t *) module->data;
379 assert(module->size % 4 == 0);
380
381 if (device->instance->debug_flags & RADV_DEBUG_DUMP_SPIRV)
382 radv_print_spirv(module->data, module->size, stderr);
383
384 uint32_t num_spec_entries = 0;
385 struct nir_spirv_specialization *spec_entries = NULL;
386 if (spec_info && spec_info->mapEntryCount > 0) {
387 num_spec_entries = spec_info->mapEntryCount;
388 spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
389 for (uint32_t i = 0; i < num_spec_entries; i++) {
390 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
391 const void *data = spec_info->pData + entry.offset;
392 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
393
394 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
395 switch (entry.size) {
396 case 8:
397 spec_entries[i].value.u64 = *(const uint64_t *)data;
398 break;
399 case 4:
400 spec_entries[i].value.u32 = *(const uint32_t *)data;
401 break;
402 case 2:
403 spec_entries[i].value.u16 = *(const uint16_t *)data;
404 break;
405 case 1:
406 spec_entries[i].value.u8 = *(const uint8_t *)data;
407 break;
408 default:
409 assert(!"Invalid spec constant size");
410 break;
411 }
412 }
413 }
414
415 struct radv_shader_debug_data spirv_debug_data = {
416 .device = device,
417 .module = module,
418 };
419 const struct spirv_to_nir_options spirv_options = {
420 .lower_ubo_ssbo_access_to_offsets = true,
421 .caps = {
422 .amd_fragment_mask = true,
423 .amd_gcn_shader = true,
424 .amd_image_gather_bias_lod = true,
425 .amd_image_read_write_lod = true,
426 .amd_shader_ballot = true,
427 .amd_shader_explicit_vertex_parameter = true,
428 .amd_trinary_minmax = true,
429 .demote_to_helper_invocation = true,
430 .derivative_group = true,
431 .descriptor_array_dynamic_indexing = true,
432 .descriptor_array_non_uniform_indexing = true,
433 .descriptor_indexing = true,
434 .device_group = true,
435 .draw_parameters = true,
436 .float_controls = true,
437 .float16 = device->physical_device->rad_info.has_packed_math_16bit,
438 .float32_atomic_add = true,
439 .float64 = true,
440 .geometry_streams = true,
441 .image_ms_array = true,
442 .image_read_without_format = true,
443 .image_write_without_format = true,
444 .int8 = true,
445 .int16 = true,
446 .int64 = true,
447 .int64_atomics = true,
448 .min_lod = true,
449 .multiview = true,
450 .physical_storage_buffer_address = true,
451 .post_depth_coverage = true,
452 .runtime_descriptor_array = true,
453 .shader_clock = true,
454 .shader_viewport_index_layer = true,
455 .stencil_export = true,
456 .storage_8bit = true,
457 .storage_16bit = true,
458 .storage_image_ms = true,
459 .subgroup_arithmetic = true,
460 .subgroup_ballot = true,
461 .subgroup_basic = true,
462 .subgroup_quad = true,
463 .subgroup_shuffle = true,
464 .subgroup_vote = true,
465 .tessellation = true,
466 .transform_feedback = true,
467 .variable_pointers = true,
468 .vk_memory_model = true,
469 .vk_memory_model_device_scope = true,
470 },
471 .ubo_addr_format = nir_address_format_32bit_index_offset,
472 .ssbo_addr_format = nir_address_format_32bit_index_offset,
473 .phys_ssbo_addr_format = nir_address_format_64bit_global,
474 .push_const_addr_format = nir_address_format_logical,
475 .shared_addr_format = nir_address_format_32bit_offset,
476 .frag_coord_is_sysval = true,
477 .debug = {
478 .func = radv_spirv_nir_debug,
479 .private_data = &spirv_debug_data,
480 },
481 };
482 nir = spirv_to_nir(spirv, module->size / 4,
483 spec_entries, num_spec_entries,
484 stage, entrypoint_name,
485 &spirv_options, nir_options);
486 assert(nir->info.stage == stage);
487 nir_validate_shader(nir, "after spirv_to_nir");
488
489 free(spec_entries);
490
491 /* We have to lower away local constant initializers right before we
492 * inline functions. That way they get properly initialized at the top
493 * of the function and not at the top of its caller.
494 */
495 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
496 NIR_PASS_V(nir, nir_lower_returns);
497 NIR_PASS_V(nir, nir_inline_functions);
498 NIR_PASS_V(nir, nir_copy_prop);
499 NIR_PASS_V(nir, nir_opt_deref);
500
501 /* Pick off the single entrypoint that we want */
502 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
503 if (func->is_entrypoint)
504 func->name = ralloc_strdup(func, "main");
505 else
506 exec_node_remove(&func->node);
507 }
508 assert(exec_list_length(&nir->functions) == 1);
509
510 /* Make sure we lower constant initializers on output variables so that
511 * nir_remove_dead_variables below sees the corresponding stores
512 */
513 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_shader_out);
514
515 /* Now that we've deleted all but the main function, we can go ahead and
516 * lower the rest of the constant initializers.
517 */
518 NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
519
520 /* Split member structs. We do this before lower_io_to_temporaries so that
521 * it doesn't lower system values to temporaries by accident.
522 */
523 NIR_PASS_V(nir, nir_split_var_copies);
524 NIR_PASS_V(nir, nir_split_per_member_structs);
525
526 if (nir->info.stage == MESA_SHADER_FRAGMENT &&
527 !radv_use_llvm_for_stage(device, nir->info.stage))
528 NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out);
529 if (nir->info.stage == MESA_SHADER_FRAGMENT)
530 NIR_PASS_V(nir, nir_lower_input_attachments,
531 &(nir_input_attachment_options) {
532 .use_fragcoord_sysval = true,
533 .use_layer_id_sysval = false,
534 });
535
536 NIR_PASS_V(nir, nir_remove_dead_variables,
537 nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared,
538 NULL);
539
540 NIR_PASS_V(nir, nir_propagate_invariant);
541
542 NIR_PASS_V(nir, nir_lower_system_values);
543 NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
544
545 if (device->instance->debug_flags & RADV_DEBUG_DISCARD_TO_DEMOTE)
546 NIR_PASS_V(nir, nir_lower_discard_to_demote);
547
548 nir_lower_doubles_options lower_doubles =
549 nir->options->lower_doubles_options;
550
551 if (device->physical_device->rad_info.chip_class == GFX6) {
552 /* GFX6 doesn't support v_floor_f64 and the precision
553 * of v_fract_f64 which is used to implement 64-bit
554 * floor is less than what Vulkan requires.
555 */
556 lower_doubles |= nir_lower_dfloor;
557 }
558
559 NIR_PASS_V(nir, nir_lower_doubles, NULL, lower_doubles);
560 }
561
562 /* Vulkan uses the separate-shader linking model */
563 nir->info.separate_shader = true;
564
565 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
566
567 if (nir->info.stage == MESA_SHADER_GEOMETRY)
568 nir_lower_gs_intrinsics(nir, true);
569
570 static const nir_lower_tex_options tex_options = {
571 .lower_txp = ~0,
572 .lower_tg4_offsets = true,
573 };
574
575 nir_lower_tex(nir, &tex_options);
576
577 nir_lower_vars_to_ssa(nir);
578
579 if (nir->info.stage == MESA_SHADER_VERTEX ||
580 nir->info.stage == MESA_SHADER_GEOMETRY ||
581 nir->info.stage == MESA_SHADER_FRAGMENT) {
582 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
583 nir_shader_get_entrypoint(nir), true, true);
584 } else if (nir->info.stage == MESA_SHADER_TESS_EVAL) {
585 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
586 nir_shader_get_entrypoint(nir), true, false);
587 }
588
589 nir_split_var_copies(nir);
590
591 nir_lower_global_vars_to_local(nir);
592 nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
593 bool gfx7minus = device->physical_device->rad_info.chip_class <= GFX7;
594 nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
595 .subgroup_size = subgroup_size,
596 .ballot_bit_size = ballot_bit_size,
597 .lower_to_scalar = 1,
598 .lower_subgroup_masks = 1,
599 .lower_shuffle = 1,
600 .lower_shuffle_to_32bit = 1,
601 .lower_vote_eq_to_ballot = 1,
602 .lower_quad_broadcast_dynamic = 1,
603 .lower_quad_broadcast_dynamic_to_const = gfx7minus,
604 .lower_shuffle_to_swizzle_amd = 1,
605 });
606
607 nir_lower_load_const_to_scalar(nir);
608
609 if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
610 radv_optimize_nir(nir, false, true);
611
612 /* call radv_nir_lower_ycbcr_textures() late as there might still be
613 * tex with undef texture/sampler before first optimization */
614 NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
615
616 /* We call nir_lower_var_copies() after the first radv_optimize_nir()
617 * to remove any copies introduced by nir_opt_find_array_copies().
618 */
619 nir_lower_var_copies(nir);
620
621 /* Lower deref operations for compute shared memory. */
622 if (nir->info.stage == MESA_SHADER_COMPUTE) {
623 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
624 nir_var_mem_shared, shared_var_info);
625 NIR_PASS_V(nir, nir_lower_explicit_io,
626 nir_var_mem_shared, nir_address_format_32bit_offset);
627 }
628
629 /* Lower large variables that are always constant with load_constant
630 * intrinsics, which get turned into PC-relative loads from a data
631 * section next to the shader.
632 */
633 NIR_PASS_V(nir, nir_opt_large_constants,
634 glsl_get_natural_size_align_bytes, 16);
635
636 /* Indirect lowering must be called after the radv_optimize_nir() loop
637 * has been called at least once. Otherwise indirect lowering can
638 * bloat the instruction count of the loop and cause it to be
639 * considered too large for unrolling.
640 */
641 ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
642 radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT, false);
643
644 return nir;
645 }
646
647 static int
648 type_size_vec4(const struct glsl_type *type, bool bindless)
649 {
650 return glsl_count_attribute_slots(type, false);
651 }
652
653 static nir_variable *
654 find_layer_in_var(nir_shader *nir)
655 {
656 nir_variable *var =
657 nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_LAYER);
658 if (var != NULL)
659 return var;
660
661 var = nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
662 var->data.location = VARYING_SLOT_LAYER;
663 var->data.interpolation = INTERP_MODE_FLAT;
664 return var;
665 }
666
667 /* We use layered rendering to implement multiview, which means we need to map
668 * view_index to gl_Layer. The code generates a load from the layer_id sysval,
669 * but since we don't have a way to get at this information from the fragment
670 * shader, we also need to lower this to the gl_Layer varying. This pass
671 * lowers both to a varying load from the LAYER slot, before lowering io, so
672 * that nir_assign_var_locations() will give the LAYER varying the correct
673 * driver_location.
674 */
675
676 static bool
677 lower_view_index(nir_shader *nir)
678 {
679 bool progress = false;
680 nir_function_impl *entry = nir_shader_get_entrypoint(nir);
681 nir_builder b;
682 nir_builder_init(&b, entry);
683
684 nir_variable *layer = NULL;
685 nir_foreach_block(block, entry) {
686 nir_foreach_instr_safe(instr, block) {
687 if (instr->type != nir_instr_type_intrinsic)
688 continue;
689
690 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
691 if (load->intrinsic != nir_intrinsic_load_view_index)
692 continue;
693
694 if (!layer)
695 layer = find_layer_in_var(nir);
696
697 b.cursor = nir_before_instr(instr);
698 nir_ssa_def *def = nir_load_var(&b, layer);
699 nir_ssa_def_rewrite_uses(&load->dest.ssa,
700 nir_src_for_ssa(def));
701
702 nir_instr_remove(instr);
703 progress = true;
704 }
705 }
706
707 return progress;
708 }
709
710 void
711 radv_lower_fs_io(nir_shader *nir)
712 {
713 NIR_PASS_V(nir, lower_view_index);
714 nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs,
715 MESA_SHADER_FRAGMENT);
716
717 NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
718
719 /* This pass needs actual constants */
720 nir_opt_constant_folding(nir);
721
722 NIR_PASS_V(nir, nir_io_add_const_offset_to_base, nir_var_shader_in);
723 }
724
725
726 static void *
727 radv_alloc_shader_memory(struct radv_device *device,
728 struct radv_shader_variant *shader)
729 {
730 mtx_lock(&device->shader_slab_mutex);
731 list_for_each_entry(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
732 uint64_t offset = 0;
733 list_for_each_entry(struct radv_shader_variant, s, &slab->shaders, slab_list) {
734 if (s->bo_offset - offset >= shader->code_size) {
735 shader->bo = slab->bo;
736 shader->bo_offset = offset;
737 list_addtail(&shader->slab_list, &s->slab_list);
738 mtx_unlock(&device->shader_slab_mutex);
739 return slab->ptr + offset;
740 }
741 offset = align_u64(s->bo_offset + s->code_size, 256);
742 }
743 if (offset <= slab->size && slab->size - offset >= shader->code_size) {
744 shader->bo = slab->bo;
745 shader->bo_offset = offset;
746 list_addtail(&shader->slab_list, &slab->shaders);
747 mtx_unlock(&device->shader_slab_mutex);
748 return slab->ptr + offset;
749 }
750 }
751
752 mtx_unlock(&device->shader_slab_mutex);
753 struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
754
755 slab->size = MAX2(256 * 1024, shader->code_size);
756 slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
757 RADEON_DOMAIN_VRAM,
758 RADEON_FLAG_NO_INTERPROCESS_SHARING |
759 (device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
760 0 : RADEON_FLAG_READ_ONLY),
761 RADV_BO_PRIORITY_SHADER);
762 if (!slab->bo) {
763 free(slab);
764 return NULL;
765 }
766
767 slab->ptr = (char*)device->ws->buffer_map(slab->bo);
768 if (!slab->ptr) {
769 device->ws->buffer_destroy(slab->bo);
770 free(slab);
771 return NULL;
772 }
773
774 list_inithead(&slab->shaders);
775
776 mtx_lock(&device->shader_slab_mutex);
777 list_add(&slab->slabs, &device->shader_slabs);
778
779 shader->bo = slab->bo;
780 shader->bo_offset = 0;
781 list_add(&shader->slab_list, &slab->shaders);
782 mtx_unlock(&device->shader_slab_mutex);
783 return slab->ptr;
784 }
785
786 void
787 radv_destroy_shader_slabs(struct radv_device *device)
788 {
789 list_for_each_entry_safe(struct radv_shader_slab, slab, &device->shader_slabs, slabs) {
790 device->ws->buffer_destroy(slab->bo);
791 free(slab);
792 }
793 mtx_destroy(&device->shader_slab_mutex);
794 }
795
796 /* For the UMR disassembler. */
797 #define DEBUGGER_END_OF_CODE_MARKER 0xbf9f0000 /* invalid instruction */
798 #define DEBUGGER_NUM_MARKERS 5
799
800 static unsigned
801 radv_get_shader_binary_size(size_t code_size)
802 {
803 return code_size + DEBUGGER_NUM_MARKERS * 4;
804 }
805
806 static void radv_postprocess_config(const struct radv_physical_device *pdevice,
807 const struct ac_shader_config *config_in,
808 const struct radv_shader_info *info,
809 gl_shader_stage stage,
810 struct ac_shader_config *config_out)
811 {
812 bool scratch_enabled = config_in->scratch_bytes_per_wave > 0;
813 unsigned vgpr_comp_cnt = 0;
814 unsigned num_input_vgprs = info->num_input_vgprs;
815
816 if (stage == MESA_SHADER_FRAGMENT) {
817 num_input_vgprs = ac_get_fs_input_vgpr_cnt(config_in, NULL, NULL);
818 }
819
820 unsigned num_vgprs = MAX2(config_in->num_vgprs, num_input_vgprs);
821 /* +3 for scratch wave offset and VCC */
822 unsigned num_sgprs = MAX2(config_in->num_sgprs, info->num_input_sgprs + 3);
823 unsigned num_shared_vgprs = config_in->num_shared_vgprs;
824 /* shared VGPRs are introduced in Navi and are allocated in blocks of 8 (RDNA ref 3.6.5) */
825 assert((pdevice->rad_info.chip_class >= GFX10 && num_shared_vgprs % 8 == 0)
826 || (pdevice->rad_info.chip_class < GFX10 && num_shared_vgprs == 0));
827 unsigned num_shared_vgpr_blocks = num_shared_vgprs / 8;
828
829 *config_out = *config_in;
830 config_out->num_vgprs = num_vgprs;
831 config_out->num_sgprs = num_sgprs;
832 config_out->num_shared_vgprs = num_shared_vgprs;
833
834 config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
835 S_00B12C_SCRATCH_EN(scratch_enabled);
836
837 if (!pdevice->use_ngg_streamout) {
838 config_out->rsrc2 |= S_00B12C_SO_BASE0_EN(!!info->so.strides[0]) |
839 S_00B12C_SO_BASE1_EN(!!info->so.strides[1]) |
840 S_00B12C_SO_BASE2_EN(!!info->so.strides[2]) |
841 S_00B12C_SO_BASE3_EN(!!info->so.strides[3]) |
842 S_00B12C_SO_EN(!!info->so.num_outputs);
843 }
844
845 config_out->rsrc1 = S_00B848_VGPRS((num_vgprs - 1) /
846 (info->wave_size == 32 ? 8 : 4)) |
847 S_00B848_DX10_CLAMP(1) |
848 S_00B848_FLOAT_MODE(config_out->float_mode);
849
850 if (pdevice->rad_info.chip_class >= GFX10) {
851 config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(info->num_user_sgprs >> 5);
852 } else {
853 config_out->rsrc1 |= S_00B228_SGPRS((num_sgprs - 1) / 8);
854 config_out->rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX9(info->num_user_sgprs >> 5);
855 }
856
857 switch (stage) {
858 case MESA_SHADER_TESS_EVAL:
859 if (info->is_ngg) {
860 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
861 config_out->rsrc2 |= S_00B22C_OC_LDS_EN(1);
862 } else if (info->tes.as_es) {
863 assert(pdevice->rad_info.chip_class <= GFX8);
864 vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
865
866 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
867 } else {
868 bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
869 vgpr_comp_cnt = enable_prim_id ? 3 : 2;
870
871 config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
872 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
873 }
874 config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
875 break;
876 case MESA_SHADER_TESS_CTRL:
877 if (pdevice->rad_info.chip_class >= GFX9) {
878 /* We need at least 2 components for LS.
879 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
880 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
881 */
882 if (pdevice->rad_info.chip_class >= GFX10) {
883 vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1;
884 config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX10(info->tcs.num_lds_blocks);
885 } else {
886 vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
887 config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX9(info->tcs.num_lds_blocks);
888 }
889 } else {
890 config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
891 }
892 config_out->rsrc1 |= S_00B428_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
893 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
894 config_out->rsrc2 |= S_00B42C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
895 break;
896 case MESA_SHADER_VERTEX:
897 if (info->is_ngg) {
898 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
899 } else if (info->vs.as_ls) {
900 assert(pdevice->rad_info.chip_class <= GFX8);
901 /* We need at least 2 components for LS.
902 * VGPR0-3: (VertexID, RelAutoindex, InstanceID / StepRate0, InstanceID).
903 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
904 */
905 vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
906 } else if (info->vs.as_es) {
907 assert(pdevice->rad_info.chip_class <= GFX8);
908 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
909 vgpr_comp_cnt = info->vs.needs_instance_id ? 1 : 0;
910 } else {
911 /* VGPR0-3: (VertexID, InstanceID / StepRate0, PrimID, InstanceID)
912 * If PrimID is disabled. InstanceID / StepRate1 is loaded instead.
913 * StepRate0 is set to 1. so that VGPR3 doesn't have to be loaded.
914 */
915 if (info->vs.needs_instance_id && pdevice->rad_info.chip_class >= GFX10) {
916 vgpr_comp_cnt = 3;
917 } else if (info->vs.export_prim_id) {
918 vgpr_comp_cnt = 2;
919 } else if (info->vs.needs_instance_id) {
920 vgpr_comp_cnt = 1;
921 } else {
922 vgpr_comp_cnt = 0;
923 }
924
925 config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
926 }
927 config_out->rsrc2 |= S_00B12C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
928 break;
929 case MESA_SHADER_FRAGMENT:
930 config_out->rsrc1 |= S_00B028_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
931 config_out->rsrc2 |= S_00B02C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
932 break;
933 case MESA_SHADER_GEOMETRY:
934 config_out->rsrc1 |= S_00B228_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
935 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
936 config_out->rsrc2 |= S_00B22C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
937 break;
938 case MESA_SHADER_COMPUTE:
939 config_out->rsrc1 |= S_00B848_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10) |
940 S_00B848_WGP_MODE(pdevice->rad_info.chip_class >= GFX10);
941 config_out->rsrc2 |=
942 S_00B84C_TGID_X_EN(info->cs.uses_block_id[0]) |
943 S_00B84C_TGID_Y_EN(info->cs.uses_block_id[1]) |
944 S_00B84C_TGID_Z_EN(info->cs.uses_block_id[2]) |
945 S_00B84C_TIDIG_COMP_CNT(info->cs.uses_thread_id[2] ? 2 :
946 info->cs.uses_thread_id[1] ? 1 : 0) |
947 S_00B84C_TG_SIZE_EN(info->cs.uses_local_invocation_idx) |
948 S_00B84C_LDS_SIZE(config_in->lds_size);
949 config_out->rsrc3 |= S_00B8A0_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
950
951 break;
952 default:
953 unreachable("unsupported shader type");
954 break;
955 }
956
957 if (pdevice->rad_info.chip_class >= GFX10 && info->is_ngg &&
958 (stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL || stage == MESA_SHADER_GEOMETRY)) {
959 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
960 gl_shader_stage es_stage = stage;
961 if (stage == MESA_SHADER_GEOMETRY)
962 es_stage = info->gs.es_type;
963
964 /* VGPR5-8: (VertexID, UserVGPR0, UserVGPR1, UserVGPR2 / InstanceID) */
965 if (es_stage == MESA_SHADER_VERTEX) {
966 es_vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 0;
967 } else if (es_stage == MESA_SHADER_TESS_EVAL) {
968 bool enable_prim_id = info->tes.export_prim_id || info->uses_prim_id;
969 es_vgpr_comp_cnt = enable_prim_id ? 3 : 2;
970 } else
971 unreachable("Unexpected ES shader stage");
972
973 bool tes_triangles = stage == MESA_SHADER_TESS_EVAL &&
974 info->tes.primitive_mode >= 4; /* GL_TRIANGLES */
975 if (info->uses_invocation_id || stage == MESA_SHADER_VERTEX) {
976 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
977 } else if (info->uses_prim_id) {
978 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
979 } else if (info->gs.vertices_in >= 3 || tes_triangles) {
980 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
981 } else {
982 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
983 }
984
985 config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt) |
986 S_00B228_WGP_MODE(1);
987 config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
988 S_00B22C_LDS_SIZE(config_in->lds_size) |
989 S_00B22C_OC_LDS_EN(es_stage == MESA_SHADER_TESS_EVAL);
990 } else if (pdevice->rad_info.chip_class >= GFX9 &&
991 stage == MESA_SHADER_GEOMETRY) {
992 unsigned es_type = info->gs.es_type;
993 unsigned gs_vgpr_comp_cnt, es_vgpr_comp_cnt;
994
995 if (es_type == MESA_SHADER_VERTEX) {
996 /* VGPR0-3: (VertexID, InstanceID / StepRate0, ...) */
997 if (info->vs.needs_instance_id) {
998 es_vgpr_comp_cnt = pdevice->rad_info.chip_class >= GFX10 ? 3 : 1;
999 } else {
1000 es_vgpr_comp_cnt = 0;
1001 }
1002 } else if (es_type == MESA_SHADER_TESS_EVAL) {
1003 es_vgpr_comp_cnt = info->uses_prim_id ? 3 : 2;
1004 } else {
1005 unreachable("invalid shader ES type");
1006 }
1007
1008 /* If offsets 4, 5 are used, GS_VGPR_COMP_CNT is ignored and
1009 * VGPR[0:4] are always loaded.
1010 */
1011 if (info->uses_invocation_id) {
1012 gs_vgpr_comp_cnt = 3; /* VGPR3 contains InvocationID. */
1013 } else if (info->uses_prim_id) {
1014 gs_vgpr_comp_cnt = 2; /* VGPR2 contains PrimitiveID. */
1015 } else if (info->gs.vertices_in >= 3) {
1016 gs_vgpr_comp_cnt = 1; /* VGPR1 contains offsets 2, 3 */
1017 } else {
1018 gs_vgpr_comp_cnt = 0; /* VGPR0 contains offsets 0, 1 */
1019 }
1020
1021 config_out->rsrc1 |= S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
1022 config_out->rsrc2 |= S_00B22C_ES_VGPR_COMP_CNT(es_vgpr_comp_cnt) |
1023 S_00B22C_OC_LDS_EN(es_type == MESA_SHADER_TESS_EVAL);
1024 } else if (pdevice->rad_info.chip_class >= GFX9 &&
1025 stage == MESA_SHADER_TESS_CTRL) {
1026 config_out->rsrc1 |= S_00B428_LS_VGPR_COMP_CNT(vgpr_comp_cnt);
1027 } else {
1028 config_out->rsrc1 |= S_00B128_VGPR_COMP_CNT(vgpr_comp_cnt);
1029 }
1030 }
1031
1032 struct radv_shader_variant *
1033 radv_shader_variant_create(struct radv_device *device,
1034 const struct radv_shader_binary *binary,
1035 bool keep_shader_info)
1036 {
1037 struct ac_shader_config config = {0};
1038 struct ac_rtld_binary rtld_binary = {0};
1039 struct radv_shader_variant *variant = calloc(1, sizeof(struct radv_shader_variant));
1040 if (!variant)
1041 return NULL;
1042
1043 variant->ref_count = 1;
1044
1045 if (binary->type == RADV_BINARY_TYPE_RTLD) {
1046 struct ac_rtld_symbol lds_symbols[2];
1047 unsigned num_lds_symbols = 0;
1048 const char *elf_data = (const char *)((struct radv_shader_binary_rtld *)binary)->data;
1049 size_t elf_size = ((struct radv_shader_binary_rtld *)binary)->elf_size;
1050
1051 if (device->physical_device->rad_info.chip_class >= GFX9 &&
1052 (binary->stage == MESA_SHADER_GEOMETRY || binary->info.is_ngg) &&
1053 !binary->is_gs_copy_shader) {
1054 /* We add this symbol even on LLVM <= 8 to ensure that
1055 * shader->config.lds_size is set correctly below.
1056 */
1057 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1058 sym->name = "esgs_ring";
1059 sym->size = binary->info.ngg_info.esgs_ring_size;
1060 sym->align = 64 * 1024;
1061 }
1062
1063 if (binary->info.is_ngg &&
1064 binary->stage == MESA_SHADER_GEOMETRY) {
1065 struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
1066 sym->name = "ngg_emit";
1067 sym->size = binary->info.ngg_info.ngg_emit_size * 4;
1068 sym->align = 4;
1069 }
1070
1071 struct ac_rtld_open_info open_info = {
1072 .info = &device->physical_device->rad_info,
1073 .shader_type = binary->stage,
1074 .wave_size = binary->info.wave_size,
1075 .num_parts = 1,
1076 .elf_ptrs = &elf_data,
1077 .elf_sizes = &elf_size,
1078 .num_shared_lds_symbols = num_lds_symbols,
1079 .shared_lds_symbols = lds_symbols,
1080 };
1081
1082 if (!ac_rtld_open(&rtld_binary, open_info)) {
1083 free(variant);
1084 return NULL;
1085 }
1086
1087 if (!ac_rtld_read_config(&device->physical_device->rad_info,
1088 &rtld_binary, &config)) {
1089 ac_rtld_close(&rtld_binary);
1090 free(variant);
1091 return NULL;
1092 }
1093
1094 if (rtld_binary.lds_size > 0) {
1095 unsigned alloc_granularity = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
1096 config.lds_size = align(rtld_binary.lds_size, alloc_granularity) / alloc_granularity;
1097 }
1098
1099 variant->code_size = rtld_binary.rx_size;
1100 variant->exec_size = rtld_binary.exec_size;
1101 } else {
1102 assert(binary->type == RADV_BINARY_TYPE_LEGACY);
1103 config = ((struct radv_shader_binary_legacy *)binary)->config;
1104 variant->code_size = radv_get_shader_binary_size(((struct radv_shader_binary_legacy *)binary)->code_size);
1105 variant->exec_size = ((struct radv_shader_binary_legacy *)binary)->exec_size;
1106 }
1107
1108 variant->info = binary->info;
1109 radv_postprocess_config(device->physical_device, &config, &binary->info,
1110 binary->stage, &variant->config);
1111
1112 void *dest_ptr = radv_alloc_shader_memory(device, variant);
1113 if (!dest_ptr) {
1114 if (binary->type == RADV_BINARY_TYPE_RTLD)
1115 ac_rtld_close(&rtld_binary);
1116 free(variant);
1117 return NULL;
1118 }
1119
1120 if (binary->type == RADV_BINARY_TYPE_RTLD) {
1121 struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;
1122 struct ac_rtld_upload_info info = {
1123 .binary = &rtld_binary,
1124 .rx_va = radv_buffer_get_va(variant->bo) + variant->bo_offset,
1125 .rx_ptr = dest_ptr,
1126 };
1127
1128 if (!ac_rtld_upload(&info)) {
1129 radv_shader_variant_destroy(device, variant);
1130 ac_rtld_close(&rtld_binary);
1131 return NULL;
1132 }
1133
1134 if (keep_shader_info ||
1135 (device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS)) {
1136 const char *disasm_data;
1137 size_t disasm_size;
1138 if (!ac_rtld_get_section_by_name(&rtld_binary, ".AMDGPU.disasm", &disasm_data, &disasm_size)) {
1139 radv_shader_variant_destroy(device, variant);
1140 ac_rtld_close(&rtld_binary);
1141 return NULL;
1142 }
1143
1144 variant->ir_string = bin->llvm_ir_size ? strdup((const char*)(bin->data + bin->elf_size)) : NULL;
1145 variant->disasm_string = malloc(disasm_size + 1);
1146 memcpy(variant->disasm_string, disasm_data, disasm_size);
1147 variant->disasm_string[disasm_size] = 0;
1148 }
1149
1150 ac_rtld_close(&rtld_binary);
1151 } else {
1152 struct radv_shader_binary_legacy* bin = (struct radv_shader_binary_legacy *)binary;
1153 memcpy(dest_ptr, bin->data + bin->stats_size, bin->code_size);
1154
1155 /* Add end-of-code markers for the UMR disassembler. */
1156 uint32_t *ptr32 = (uint32_t *)dest_ptr + bin->code_size / 4;
1157 for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
1158 ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
1159
1160 variant->ir_string = bin->ir_size ? strdup((const char*)(bin->data + bin->stats_size + bin->code_size)) : NULL;
1161 variant->disasm_string = bin->disasm_size ? strdup((const char*)(bin->data + bin->stats_size + bin->code_size + bin->ir_size)) : NULL;
1162
1163 if (bin->stats_size) {
1164 variant->statistics = calloc(bin->stats_size, 1);
1165 memcpy(variant->statistics, bin->data, bin->stats_size);
1166 }
1167 }
1168 return variant;
1169 }
1170
1171 static char *
1172 radv_dump_nir_shaders(struct nir_shader * const *shaders,
1173 int shader_count)
1174 {
1175 char *data = NULL;
1176 char *ret = NULL;
1177 size_t size = 0;
1178 FILE *f = open_memstream(&data, &size);
1179 if (f) {
1180 for (int i = 0; i < shader_count; ++i)
1181 nir_print_shader(shaders[i], f);
1182 fclose(f);
1183 }
1184
1185 ret = malloc(size + 1);
1186 if (ret) {
1187 memcpy(ret, data, size);
1188 ret[size] = 0;
1189 }
1190 free(data);
1191 return ret;
1192 }
1193
1194 static struct radv_shader_variant *
1195 shader_variant_compile(struct radv_device *device,
1196 struct radv_shader_module *module,
1197 struct nir_shader * const *shaders,
1198 int shader_count,
1199 gl_shader_stage stage,
1200 struct radv_shader_info *info,
1201 struct radv_nir_compiler_options *options,
1202 bool gs_copy_shader,
1203 bool keep_shader_info,
1204 bool keep_statistic_info,
1205 struct radv_shader_binary **binary_out)
1206 {
1207 enum radeon_family chip_family = device->physical_device->rad_info.family;
1208 struct radv_shader_binary *binary = NULL;
1209
1210 struct radv_shader_debug_data debug_data = {
1211 .device = device,
1212 .module = module,
1213 };
1214
1215 options->family = chip_family;
1216 options->chip_class = device->physical_device->rad_info.chip_class;
1217 options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
1218 options->dump_preoptir = options->dump_shader &&
1219 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
1220 options->record_ir = keep_shader_info;
1221 options->record_stats = keep_statistic_info;
1222 options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
1223 options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
1224 options->address32_hi = device->physical_device->rad_info.address32_hi;
1225 options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug;
1226 options->use_ngg_streamout = device->physical_device->use_ngg_streamout;
1227 options->enable_mrt_output_nan_fixup = device->instance->enable_mrt_output_nan_fixup;
1228 options->debug.func = radv_compiler_debug;
1229 options->debug.private_data = &debug_data;
1230
1231 struct radv_shader_args args = {};
1232 args.options = options;
1233 args.shader_info = info;
1234 args.is_gs_copy_shader = gs_copy_shader;
1235 radv_declare_shader_args(&args,
1236 gs_copy_shader ? MESA_SHADER_VERTEX
1237 : shaders[shader_count - 1]->info.stage,
1238 shader_count >= 2,
1239 shader_count >= 2 ? shaders[shader_count - 2]->info.stage
1240 : MESA_SHADER_VERTEX);
1241
1242 if (radv_use_llvm_for_stage(device, stage) ||
1243 options->dump_shader || options->record_ir)
1244 ac_init_llvm_once();
1245
1246 if (radv_use_llvm_for_stage(device, stage)) {
1247 llvm_compile_shader(device, shader_count, shaders, &binary, &args);
1248 } else {
1249 aco_compile_shader(shader_count, shaders, &binary, &args);
1250 }
1251
1252 binary->info = *info;
1253
1254 struct radv_shader_variant *variant = radv_shader_variant_create(device, binary,
1255 keep_shader_info);
1256 if (!variant) {
1257 free(binary);
1258 return NULL;
1259 }
1260
1261 if (options->dump_shader) {
1262 fprintf(stderr, "%s", radv_get_shader_name(info, shaders[0]->info.stage));
1263 for (int i = 1; i < shader_count; ++i)
1264 fprintf(stderr, " + %s", radv_get_shader_name(info, shaders[i]->info.stage));
1265
1266 fprintf(stderr, "\ndisasm:\n%s\n", variant->disasm_string);
1267 }
1268
1269
1270 if (keep_shader_info) {
1271 variant->nir_string = radv_dump_nir_shaders(shaders, shader_count);
1272 if (!gs_copy_shader && !module->nir) {
1273 variant->spirv = malloc(module->size);
1274 if (!variant->spirv) {
1275 free(variant);
1276 free(binary);
1277 return NULL;
1278 }
1279
1280 memcpy(variant->spirv, module->data, module->size);
1281 variant->spirv_size = module->size;
1282 }
1283 }
1284
1285 if (binary_out)
1286 *binary_out = binary;
1287 else
1288 free(binary);
1289
1290 return variant;
1291 }
1292
1293 struct radv_shader_variant *
1294 radv_shader_variant_compile(struct radv_device *device,
1295 struct radv_shader_module *module,
1296 struct nir_shader *const *shaders,
1297 int shader_count,
1298 struct radv_pipeline_layout *layout,
1299 const struct radv_shader_variant_key *key,
1300 struct radv_shader_info *info,
1301 bool keep_shader_info, bool keep_statistic_info,
1302 struct radv_shader_binary **binary_out)
1303 {
1304 gl_shader_stage stage = shaders[shader_count - 1]->info.stage;
1305 struct radv_nir_compiler_options options = {0};
1306
1307 options.layout = layout;
1308 if (key)
1309 options.key = *key;
1310
1311 options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage);
1312 options.robust_buffer_access = device->robust_buffer_access;
1313
1314 return shader_variant_compile(device, module, shaders, shader_count, stage, info,
1315 &options, false, keep_shader_info, keep_statistic_info, binary_out);
1316 }
1317
1318 struct radv_shader_variant *
1319 radv_create_gs_copy_shader(struct radv_device *device,
1320 struct nir_shader *shader,
1321 struct radv_shader_info *info,
1322 struct radv_shader_binary **binary_out,
1323 bool keep_shader_info, bool keep_statistic_info,
1324 bool multiview)
1325 {
1326 struct radv_nir_compiler_options options = {0};
1327 gl_shader_stage stage = MESA_SHADER_VERTEX;
1328
1329 options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage);
1330 options.key.has_multiview_view_index = multiview;
1331
1332 return shader_variant_compile(device, NULL, &shader, 1, stage,
1333 info, &options, true, keep_shader_info, keep_statistic_info, binary_out);
1334 }
1335
1336 void
1337 radv_shader_variant_destroy(struct radv_device *device,
1338 struct radv_shader_variant *variant)
1339 {
1340 if (!p_atomic_dec_zero(&variant->ref_count))
1341 return;
1342
1343 mtx_lock(&device->shader_slab_mutex);
1344 list_del(&variant->slab_list);
1345 mtx_unlock(&device->shader_slab_mutex);
1346
1347 free(variant->spirv);
1348 free(variant->nir_string);
1349 free(variant->disasm_string);
1350 free(variant->ir_string);
1351 free(variant->statistics);
1352 free(variant);
1353 }
1354
1355 const char *
1356 radv_get_shader_name(struct radv_shader_info *info,
1357 gl_shader_stage stage)
1358 {
1359 switch (stage) {
1360 case MESA_SHADER_VERTEX:
1361 if (info->vs.as_ls)
1362 return "Vertex Shader as LS";
1363 else if (info->vs.as_es)
1364 return "Vertex Shader as ES";
1365 else if (info->is_ngg)
1366 return "Vertex Shader as ESGS";
1367 else
1368 return "Vertex Shader as VS";
1369 case MESA_SHADER_TESS_CTRL:
1370 return "Tessellation Control Shader";
1371 case MESA_SHADER_TESS_EVAL:
1372 if (info->tes.as_es)
1373 return "Tessellation Evaluation Shader as ES";
1374 else if (info->is_ngg)
1375 return "Tessellation Evaluation Shader as ESGS";
1376 else
1377 return "Tessellation Evaluation Shader as VS";
1378 case MESA_SHADER_GEOMETRY:
1379 return "Geometry Shader";
1380 case MESA_SHADER_FRAGMENT:
1381 return "Pixel Shader";
1382 case MESA_SHADER_COMPUTE:
1383 return "Compute Shader";
1384 default:
1385 return "Unknown shader";
1386 };
1387 }
1388
1389 unsigned
1390 radv_get_max_workgroup_size(enum chip_class chip_class,
1391 gl_shader_stage stage,
1392 const unsigned *sizes)
1393 {
1394 switch (stage) {
1395 case MESA_SHADER_TESS_CTRL:
1396 return chip_class >= GFX7 ? 128 : 64;
1397 case MESA_SHADER_GEOMETRY:
1398 return chip_class >= GFX9 ? 128 : 64;
1399 case MESA_SHADER_COMPUTE:
1400 break;
1401 default:
1402 return 0;
1403 }
1404
1405 unsigned max_workgroup_size = sizes[0] * sizes[1] * sizes[2];
1406 return max_workgroup_size;
1407 }
1408
1409 unsigned
1410 radv_get_max_waves(struct radv_device *device,
1411 struct radv_shader_variant *variant,
1412 gl_shader_stage stage)
1413 {
1414 enum chip_class chip_class = device->physical_device->rad_info.chip_class;
1415 unsigned lds_increment = chip_class >= GFX7 ? 512 : 256;
1416 uint8_t wave_size = variant->info.wave_size;
1417 struct ac_shader_config *conf = &variant->config;
1418 unsigned max_simd_waves;
1419 unsigned lds_per_wave = 0;
1420
1421 max_simd_waves = device->physical_device->rad_info.max_wave64_per_simd;
1422
1423 if (stage == MESA_SHADER_FRAGMENT) {
1424 lds_per_wave = conf->lds_size * lds_increment +
1425 align(variant->info.ps.num_interp * 48,
1426 lds_increment);
1427 } else if (stage == MESA_SHADER_COMPUTE) {
1428 unsigned max_workgroup_size =
1429 radv_get_max_workgroup_size(chip_class, stage, variant->info.cs.block_size);
1430 lds_per_wave = (conf->lds_size * lds_increment) /
1431 DIV_ROUND_UP(max_workgroup_size, wave_size);
1432 }
1433
1434 if (conf->num_sgprs) {
1435 unsigned sgprs = align(conf->num_sgprs, chip_class >= GFX8 ? 16 : 8);
1436 max_simd_waves =
1437 MIN2(max_simd_waves,
1438 device->physical_device->rad_info.num_physical_sgprs_per_simd /
1439 sgprs);
1440 }
1441
1442 if (conf->num_vgprs) {
1443 unsigned vgprs = align(conf->num_vgprs, wave_size == 32 ? 8 : 4);
1444 max_simd_waves =
1445 MIN2(max_simd_waves,
1446 device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd / vgprs);
1447 }
1448
1449 unsigned max_lds_per_simd = device->physical_device->rad_info.lds_size_per_workgroup / device->physical_device->rad_info.num_simd_per_compute_unit;
1450 if (lds_per_wave)
1451 max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
1452
1453 return max_simd_waves;
1454 }
1455
1456 static void
1457 generate_shader_stats(struct radv_device *device,
1458 struct radv_shader_variant *variant,
1459 gl_shader_stage stage,
1460 struct _mesa_string_buffer *buf)
1461 {
1462 struct ac_shader_config *conf = &variant->config;
1463 unsigned max_simd_waves = radv_get_max_waves(device, variant, stage);
1464
1465 if (stage == MESA_SHADER_FRAGMENT) {
1466 _mesa_string_buffer_printf(buf, "*** SHADER CONFIG ***\n"
1467 "SPI_PS_INPUT_ADDR = 0x%04x\n"
1468 "SPI_PS_INPUT_ENA = 0x%04x\n",
1469 conf->spi_ps_input_addr, conf->spi_ps_input_ena);
1470 }
1471
1472 _mesa_string_buffer_printf(buf, "*** SHADER STATS ***\n"
1473 "SGPRS: %d\n"
1474 "VGPRS: %d\n"
1475 "Spilled SGPRs: %d\n"
1476 "Spilled VGPRs: %d\n"
1477 "PrivMem VGPRS: %d\n"
1478 "Code Size: %d bytes\n"
1479 "LDS: %d blocks\n"
1480 "Scratch: %d bytes per wave\n"
1481 "Max Waves: %d\n",
1482 conf->num_sgprs, conf->num_vgprs,
1483 conf->spilled_sgprs, conf->spilled_vgprs,
1484 variant->info.private_mem_vgprs, variant->exec_size,
1485 conf->lds_size, conf->scratch_bytes_per_wave,
1486 max_simd_waves);
1487
1488 if (variant->statistics) {
1489 _mesa_string_buffer_printf(buf, "*** COMPILER STATS ***\n");
1490 for (unsigned i = 0; i < variant->statistics->count; i++) {
1491 struct radv_compiler_statistic_info *info = &variant->statistics->infos[i];
1492 uint32_t value = variant->statistics->values[i];
1493 _mesa_string_buffer_printf(buf, "%s: %lu\n", info->name, value);
1494 }
1495 }
1496
1497 _mesa_string_buffer_printf(buf, "********************\n\n\n");
1498 }
1499
1500 void
1501 radv_shader_dump_stats(struct radv_device *device,
1502 struct radv_shader_variant *variant,
1503 gl_shader_stage stage,
1504 FILE *file)
1505 {
1506 struct _mesa_string_buffer *buf = _mesa_string_buffer_create(NULL, 256);
1507
1508 generate_shader_stats(device, variant, stage, buf);
1509
1510 fprintf(file, "\n%s:\n", radv_get_shader_name(&variant->info, stage));
1511 fprintf(file, "%s", buf->buf);
1512
1513 _mesa_string_buffer_destroy(buf);
1514 }
1515
1516 VkResult
1517 radv_GetShaderInfoAMD(VkDevice _device,
1518 VkPipeline _pipeline,
1519 VkShaderStageFlagBits shaderStage,
1520 VkShaderInfoTypeAMD infoType,
1521 size_t* pInfoSize,
1522 void* pInfo)
1523 {
1524 RADV_FROM_HANDLE(radv_device, device, _device);
1525 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
1526 gl_shader_stage stage = vk_to_mesa_shader_stage(shaderStage);
1527 struct radv_shader_variant *variant = pipeline->shaders[stage];
1528 struct _mesa_string_buffer *buf;
1529 VkResult result = VK_SUCCESS;
1530
1531 /* Spec doesn't indicate what to do if the stage is invalid, so just
1532 * return no info for this. */
1533 if (!variant)
1534 return vk_error(device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
1535
1536 switch (infoType) {
1537 case VK_SHADER_INFO_TYPE_STATISTICS_AMD:
1538 if (!pInfo) {
1539 *pInfoSize = sizeof(VkShaderStatisticsInfoAMD);
1540 } else {
1541 unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
1542 struct ac_shader_config *conf = &variant->config;
1543
1544 VkShaderStatisticsInfoAMD statistics = {};
1545 statistics.shaderStageMask = shaderStage;
1546 statistics.numPhysicalVgprs = device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd;
1547 statistics.numPhysicalSgprs = device->physical_device->rad_info.num_physical_sgprs_per_simd;
1548 statistics.numAvailableSgprs = statistics.numPhysicalSgprs;
1549
1550 if (stage == MESA_SHADER_COMPUTE) {
1551 unsigned *local_size = variant->info.cs.block_size;
1552 unsigned workgroup_size = local_size[0] * local_size[1] * local_size[2];
1553
1554 statistics.numAvailableVgprs = statistics.numPhysicalVgprs /
1555 ceil((double)workgroup_size / statistics.numPhysicalVgprs);
1556
1557 statistics.computeWorkGroupSize[0] = local_size[0];
1558 statistics.computeWorkGroupSize[1] = local_size[1];
1559 statistics.computeWorkGroupSize[2] = local_size[2];
1560 } else {
1561 statistics.numAvailableVgprs = statistics.numPhysicalVgprs;
1562 }
1563
1564 statistics.resourceUsage.numUsedVgprs = conf->num_vgprs;
1565 statistics.resourceUsage.numUsedSgprs = conf->num_sgprs;
1566 statistics.resourceUsage.ldsSizePerLocalWorkGroup = 32768;
1567 statistics.resourceUsage.ldsUsageSizeInBytes = conf->lds_size * lds_multiplier;
1568 statistics.resourceUsage.scratchMemUsageInBytes = conf->scratch_bytes_per_wave;
1569
1570 size_t size = *pInfoSize;
1571 *pInfoSize = sizeof(statistics);
1572
1573 memcpy(pInfo, &statistics, MIN2(size, *pInfoSize));
1574
1575 if (size < *pInfoSize)
1576 result = VK_INCOMPLETE;
1577 }
1578
1579 break;
1580 case VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD:
1581 buf = _mesa_string_buffer_create(NULL, 1024);
1582
1583 _mesa_string_buffer_printf(buf, "%s:\n", radv_get_shader_name(&variant->info, stage));
1584 _mesa_string_buffer_printf(buf, "%s\n\n", variant->ir_string);
1585 _mesa_string_buffer_printf(buf, "%s\n\n", variant->disasm_string);
1586 generate_shader_stats(device, variant, stage, buf);
1587
1588 /* Need to include the null terminator. */
1589 size_t length = buf->length + 1;
1590
1591 if (!pInfo) {
1592 *pInfoSize = length;
1593 } else {
1594 size_t size = *pInfoSize;
1595 *pInfoSize = length;
1596
1597 memcpy(pInfo, buf->buf, MIN2(size, length));
1598
1599 if (size < length)
1600 result = VK_INCOMPLETE;
1601 }
1602
1603 _mesa_string_buffer_destroy(buf);
1604 break;
1605 default:
1606 /* VK_SHADER_INFO_TYPE_BINARY_AMD unimplemented for now. */
1607 result = VK_ERROR_FEATURE_NOT_PRESENT;
1608 break;
1609 }
1610
1611 return result;
1612 }