This allows enabling the shader info keeping on a per shader basis.
Also disables the cache on a per shader basis.
Reviewed-by: Dave Airlie <airlied@redhat.com>
struct radv_shader_binary *binaries[MESA_SHADER_STAGES] = {NULL};
struct radv_shader_variant_key keys[MESA_SHADER_STAGES] = {{{{{0}}}}};
unsigned char hash[20], gs_copy_hash[20];
+ bool keep_executable_info = (flags & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR) || device->keep_shader_info;
radv_start_feedback(pipeline_feedback);
gs_copy_hash[0] ^= 1;
bool found_in_application_cache = true;
- if (modules[MESA_SHADER_GEOMETRY]) {
+ if (modules[MESA_SHADER_GEOMETRY] && !keep_executable_info) {
struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
radv_create_shader_variants_from_pipeline_cache(device, cache, gs_copy_hash, variants,
&found_in_application_cache);
pipeline->gs_copy_shader = variants[MESA_SHADER_GEOMETRY];
}
- if (radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders,
+ if (!keep_executable_info &&
+ radv_create_shader_variants_from_pipeline_cache(device, cache, hash, pipeline->shaders,
&found_in_application_cache) &&
(!modules[MESA_SHADER_GEOMETRY] || pipeline->gs_copy_shader)) {
radv_stop_feedback(pipeline_feedback, found_in_application_cache);
pipeline->shaders[MESA_SHADER_FRAGMENT] =
radv_shader_variant_compile(device, modules[MESA_SHADER_FRAGMENT], &nir[MESA_SHADER_FRAGMENT], 1,
pipeline->layout, keys + MESA_SHADER_FRAGMENT,
- &binaries[MESA_SHADER_FRAGMENT]);
+ keep_executable_info, &binaries[MESA_SHADER_FRAGMENT]);
radv_stop_feedback(stage_feedbacks[MESA_SHADER_FRAGMENT], false);
}
pipeline->shaders[MESA_SHADER_TESS_CTRL] = radv_shader_variant_compile(device, modules[MESA_SHADER_TESS_CTRL], combined_nir, 2,
pipeline->layout,
- &key, &binaries[MESA_SHADER_TESS_CTRL]);
+ &key, keep_executable_info,
+ &binaries[MESA_SHADER_TESS_CTRL]);
radv_stop_feedback(stage_feedbacks[MESA_SHADER_TESS_CTRL], false);
}
pipeline->shaders[MESA_SHADER_GEOMETRY] = radv_shader_variant_compile(device, modules[MESA_SHADER_GEOMETRY], combined_nir, 2,
pipeline->layout,
- &keys[pre_stage] , &binaries[MESA_SHADER_GEOMETRY]);
+ &keys[pre_stage], keep_executable_info,
+ &binaries[MESA_SHADER_GEOMETRY]);
radv_stop_feedback(stage_feedbacks[MESA_SHADER_GEOMETRY], false);
}
pipeline->shaders[i] = radv_shader_variant_compile(device, modules[i], &nir[i], 1,
pipeline->layout,
- keys + i, &binaries[i]);
+ keys + i, keep_executable_info,
+ &binaries[i]);
radv_stop_feedback(stage_feedbacks[i], false);
}
!radv_pipeline_has_ngg(pipeline)) {
pipeline->gs_copy_shader = radv_create_gs_copy_shader(
device, nir[MESA_SHADER_GEOMETRY], &gs_copy_binary,
+ keep_executable_info,
keys[MESA_SHADER_GEOMETRY].has_multiview_view_index);
}
- if (pipeline->gs_copy_shader) {
+ if (!keep_executable_info && pipeline->gs_copy_shader) {
struct radv_shader_binary *binaries[MESA_SHADER_STAGES] = {NULL};
struct radv_shader_variant *variants[MESA_SHADER_STAGES] = {0};
free(gs_copy_binary);
}
- radv_pipeline_cache_insert_shaders(device, cache, hash, pipeline->shaders,
- binaries);
+ if (!keep_executable_info) {
+ radv_pipeline_cache_insert_shaders(device, cache, hash, pipeline->shaders,
+ binaries);
+ }
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
free(binaries[i]);
* cache. Disable caching when we want to keep shader debug info, since
* we don't get the debug info on cached shaders. */
if (cache->hash_table == NULL ||
- (device->instance->debug_flags & RADV_DEBUG_NO_CACHE) ||
- device->keep_shader_info)
+ (device->instance->debug_flags & RADV_DEBUG_NO_CACHE))
cache->table_size = 0;
else
memset(cache->hash_table, 0, byte_size);
/* Pipeline caches can be disabled with RADV_DEBUG=nocache, with
* MESA_GLSL_CACHE_DISABLE=1, and when VK_AMD_shader_info is requested.
*/
- return (device->instance->debug_flags & RADV_DEBUG_NO_CACHE) ||
- device->keep_shader_info;
+ return (device->instance->debug_flags & RADV_DEBUG_NO_CACHE);
}
bool
memcpy(binary, p, entry->binary_sizes[i]);
p += entry->binary_sizes[i];
- entry->variants[i] = radv_shader_variant_create(device, binary);
+ entry->variants[i] = radv_shader_variant_create(device, binary, false);
free(binary);
} else if (entry->binary_sizes[i]) {
p += entry->binary_sizes[i];
struct radv_shader_variant *
radv_shader_variant_create(struct radv_device *device,
- const struct radv_shader_binary *binary)
+ const struct radv_shader_binary *binary,
+ bool keep_shader_info)
{
struct ac_shader_config config = {0};
struct ac_rtld_binary rtld_binary = {0};
return NULL;
}
- if (device->keep_shader_info ||
+ if (keep_shader_info ||
(device->instance->debug_flags & RADV_DEBUG_DUMP_SHADERS)) {
const char *disasm_data;
size_t disasm_size;
gl_shader_stage stage,
struct radv_nir_compiler_options *options,
bool gs_copy_shader,
+ bool keep_shader_info,
struct radv_shader_binary **binary_out)
{
enum radeon_family chip_family = device->physical_device->rad_info.family;
options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
options->dump_preoptir = options->dump_shader &&
device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
- options->record_llvm_ir = device->keep_shader_info;
+ options->record_llvm_ir = keep_shader_info;
options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
options->address32_hi = device->physical_device->rad_info.address32_hi;
radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
- struct radv_shader_variant *variant = radv_shader_variant_create(device, binary);
+ struct radv_shader_variant *variant = radv_shader_variant_create(device, binary,
+ keep_shader_info);
if (!variant) {
free(binary);
return NULL;
}
- if (device->keep_shader_info) {
+ if (keep_shader_info) {
variant->nir_string = radv_dump_nir_shaders(shaders, shader_count);
if (!gs_copy_shader && !module->nir) {
variant->spirv = (uint32_t *)module->data;
int shader_count,
struct radv_pipeline_layout *layout,
const struct radv_shader_variant_key *key,
+ bool keep_shader_info,
struct radv_shader_binary **binary_out)
{
struct radv_nir_compiler_options options = {0};
options.robust_buffer_access = device->robust_buffer_access;
return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage,
- &options, false, binary_out);
+ &options, false, keep_shader_info, binary_out);
}
struct radv_shader_variant *
radv_create_gs_copy_shader(struct radv_device *device,
struct nir_shader *shader,
struct radv_shader_binary **binary_out,
+ bool keep_shader_info,
bool multiview)
{
struct radv_nir_compiler_options options = {0};
options.key.has_multiview_view_index = multiview;
return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
- &options, true, binary_out);
+ &options, true, keep_shader_info, binary_out);
}
void
struct radv_shader_variant *
radv_shader_variant_create(struct radv_device *device,
- const struct radv_shader_binary *binary);
+ const struct radv_shader_binary *binary,
+ bool keep_shader_info);
struct radv_shader_variant *
radv_shader_variant_compile(struct radv_device *device,
struct radv_shader_module *module,
int shader_count,
struct radv_pipeline_layout *layout,
const struct radv_shader_variant_key *key,
+ bool keep_shader_info,
struct radv_shader_binary **binary_out);
struct radv_shader_variant *
radv_create_gs_copy_shader(struct radv_device *device, struct nir_shader *nir,
struct radv_shader_binary **binary_out,
- bool multiview);
+ bool multiview, bool keep_shader_info);
void
radv_shader_variant_destroy(struct radv_device *device,