ac_lower_indirect_derefs(ordered_shaders[i],
pipeline->device->physical_device->rad_info.chip_class);
}
- radv_optimize_nir(ordered_shaders[i]);
+ radv_optimize_nir(ordered_shaders[i], false);
if (nir_lower_global_vars_to_local(ordered_shaders[i - 1])) {
ac_lower_indirect_derefs(ordered_shaders[i - 1],
pipeline->device->physical_device->rad_info.chip_class);
}
- radv_optimize_nir(ordered_shaders[i - 1]);
+ radv_optimize_nir(ordered_shaders[i - 1], false);
}
}
}
struct radv_pipeline_key key;
memset(&key, 0, sizeof(key));
+ if (pCreateInfo->flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT)
+ key.optimisations_disabled = 1;
+
key.has_multiview_view_index = has_view_index;
uint32_t binding_input_rate = 0;
struct radv_device *device,
struct radv_pipeline_cache *cache,
struct radv_pipeline_key key,
- const VkPipelineShaderStageCreateInfo **pStages)
+ const VkPipelineShaderStageCreateInfo **pStages,
+ const VkPipelineCreateFlags flags)
{
struct radv_shader_module fs_m = {0};
struct radv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
nir[i] = radv_shader_compile_to_nir(device, modules[i],
stage ? stage->pName : "main", i,
- stage ? stage->pSpecializationInfo : NULL);
+ stage ? stage->pSpecializationInfo : NULL,
+ flags);
pipeline->active_stages |= mesa_to_vk_shader_stage(i);
/* We don't want to alter meta shaders IR directly so clone it
if (i != last)
mask = mask | nir_var_shader_out;
- nir_lower_io_to_scalar_early(nir[i], mask);
- radv_optimize_nir(nir[i]);
+ if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT)) {
+ nir_lower_io_to_scalar_early(nir[i], mask);
+ radv_optimize_nir(nir[i], false);
+ }
}
}
merge_tess_info(&nir[MESA_SHADER_TESS_EVAL]->info, &nir[MESA_SHADER_TESS_CTRL]->info);
}
- radv_link_shaders(pipeline, nir);
+ if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
+ radv_link_shaders(pipeline, nir);
for (int i = 0; i < MESA_SHADER_STAGES; ++i) {
if (modules[i] && radv_can_dump_shader(device, modules[i]))
radv_create_shaders(pipeline, device, cache,
radv_generate_graphics_pipeline_key(pipeline, pCreateInfo, &blend, has_view_index),
- pStages);
+ pStages, pCreateInfo->flags);
pipeline->graphics.spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1);
radv_pipeline_init_multisample_state(pipeline, &blend, pCreateInfo);
assert(pipeline->layout);
pStages[MESA_SHADER_COMPUTE] = &pCreateInfo->stage;
- radv_create_shaders(pipeline, device, cache, (struct radv_pipeline_key) {0}, pStages);
+ radv_create_shaders(pipeline, device, cache, (struct radv_pipeline_key) {0}, pStages, pCreateInfo->flags);
pipeline->user_data_0[MESA_SHADER_COMPUTE] = radv_pipeline_stage_to_user_data_0(pipeline, MESA_SHADER_COMPUTE, device->physical_device->rad_info.chip_class);
pipeline->need_indirect_descriptor_sets |= pipeline->shaders[MESA_SHADER_COMPUTE]->info.need_indirect_descriptor_sets;
}
void
-radv_optimize_nir(struct nir_shader *shader)
+radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively)
{
bool progress;
if (shader->options->max_unroll_iterations) {
NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
}
- } while (progress);
+ } while (progress && !optimize_conservatively);
NIR_PASS(progress, shader, nir_opt_shrink_load);
NIR_PASS(progress, shader, nir_opt_move_load_ubo);
struct radv_shader_module *module,
const char *entrypoint_name,
gl_shader_stage stage,
- const VkSpecializationInfo *spec_info)
+ const VkSpecializationInfo *spec_info,
+ const VkPipelineCreateFlags flags)
{
if (strcmp(entrypoint_name, "main") != 0) {
radv_finishme("Multiple shaders per module not really supported");
.lower_vote_eq_to_ballot = 1,
});
- radv_optimize_nir(nir);
+ if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
+ radv_optimize_nir(nir, false);
/* Indirect lowering must be called after the radv_optimize_nir() loop
* has been called at least once. Otherwise indirect lowering can
* considered too large for unrolling.
*/
ac_lower_indirect_derefs(nir, device->physical_device->rad_info.chip_class);
- radv_optimize_nir(nir);
+ radv_optimize_nir(nir, flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT);
return nir;
}