X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_shader.c;h=03cfe993bebc83776e1e881c1ffd7e42099e0d44;hp=f6c09866113a5e18690ed4993b7b022c695e879f;hb=b3c822a0a8665ae84452208e94006f7df802f196;hpb=64f2d45c3bdc4cd0e56bb31d17fa94246f4f1e59 diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c index f6c09866113..03cfe993beb 100644 --- a/src/amd/vulkan/radv_shader.c +++ b/src/amd/vulkan/radv_shader.c @@ -76,6 +76,7 @@ static const struct nir_shader_compiler_options nir_options_llvm = { .lower_fpow = true, .lower_mul_2x32_64 = true, .lower_rotate = true, + .use_scoped_barrier = true, .max_unroll_iterations = 32, .use_interpolated_input_intrinsics = true, /* nir_lower_int64() isn't actually called for the LLVM backend, but @@ -86,6 +87,10 @@ static const struct nir_shader_compiler_options nir_options_llvm = { nir_lower_divmod64 | nir_lower_minmax64 | nir_lower_iabs64, + .lower_doubles_options = nir_lower_drcp | + nir_lower_dsqrt | + nir_lower_drsq | + nir_lower_ddiv, }; static const struct nir_shader_compiler_options nir_options_aco = { @@ -114,15 +119,19 @@ static const struct nir_shader_compiler_options nir_options_aco = { .lower_fpow = true, .lower_mul_2x32_64 = true, .lower_rotate = true, + .use_scoped_barrier = true, .max_unroll_iterations = 32, .use_interpolated_input_intrinsics = true, .lower_int64_options = nir_lower_imul64 | nir_lower_imul_high64 | nir_lower_imul_2x32_64 | nir_lower_divmod64 | - nir_lower_logic64 | nir_lower_minmax64 | nir_lower_iabs64, + .lower_doubles_options = nir_lower_drcp | + nir_lower_dsqrt | + nir_lower_drsq | + nir_lower_ddiv, }; bool @@ -269,13 +278,13 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively, } NIR_PASS(progress, shader, nir_opt_undef); + NIR_PASS(progress, shader, nir_opt_shrink_vectors); if (shader->options->max_unroll_iterations) { NIR_PASS(progress, shader, nir_opt_loop_unroll, 0); } } while (progress && !optimize_conservatively); NIR_PASS(progress, shader, nir_opt_conditional_discard); - NIR_PASS(progress, shader, nir_opt_shrink_load); NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo); } @@ -290,6 +299,58 @@ shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align) *align = comp_size; } +struct radv_shader_debug_data { + struct radv_device *device; + const struct radv_shader_module *module; +}; + +static void radv_spirv_nir_debug(void *private_data, + enum nir_spirv_debug_level level, + size_t spirv_offset, + const char *message) +{ + struct radv_shader_debug_data *debug_data = private_data; + struct radv_instance *instance = debug_data->device->instance; + + static const VkDebugReportFlagsEXT vk_flags[] = { + [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT, + [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT, + [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT, + }; + char buffer[256]; + + snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s", + (unsigned long)spirv_offset, message); + + vk_debug_report(&instance->debug_report_callbacks, + vk_flags[level], + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, + (uint64_t)(uintptr_t)debug_data->module, + 0, 0, "radv", buffer); +} + +static void radv_compiler_debug(void *private_data, + enum radv_compiler_debug_level level, + const char *message) +{ + struct radv_shader_debug_data *debug_data = private_data; + struct radv_instance *instance = debug_data->device->instance; + + static const VkDebugReportFlagsEXT vk_flags[] = { + [RADV_COMPILER_DEBUG_LEVEL_PERFWARN] = VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, + [RADV_COMPILER_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT, + }; + + /* VK_DEBUG_REPORT_DEBUG_BIT_EXT specifies diagnostic information + * from the implementation and layers. + */ + vk_debug_report(&instance->debug_report_callbacks, + vk_flags[level] | VK_DEBUG_REPORT_DEBUG_BIT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, + (uint64_t)(uintptr_t)debug_data->module, + 0, 0, "radv", message); +} + nir_shader * radv_shader_compile_to_nir(struct radv_device *device, struct radv_shader_module *module, @@ -302,8 +363,7 @@ radv_shader_compile_to_nir(struct radv_device *device, { nir_shader *nir; const nir_shader_compiler_options *nir_options = - device->physical_device->use_aco ? &nir_options_aco : - &nir_options_llvm; + radv_use_llvm_for_stage(device, stage) ? &nir_options_llvm : &nir_options_aco; if (module->nir) { /* Some things such as our meta clear/blit code will give us a NIR @@ -351,6 +411,11 @@ radv_shader_compile_to_nir(struct radv_device *device, } } } + + struct radv_shader_debug_data spirv_debug_data = { + .device = device, + .module = module, + }; const struct spirv_to_nir_options spirv_options = { .lower_ubo_ssbo_access_to_offsets = true, .caps = { @@ -358,7 +423,7 @@ radv_shader_compile_to_nir(struct radv_device *device, .amd_gcn_shader = true, .amd_image_gather_bias_lod = true, .amd_image_read_write_lod = true, - .amd_shader_ballot = device->physical_device->use_shader_ballot, + .amd_shader_ballot = true, .amd_shader_explicit_vertex_parameter = true, .amd_trinary_minmax = true, .demote_to_helper_invocation = true, @@ -369,7 +434,8 @@ radv_shader_compile_to_nir(struct radv_device *device, .device_group = true, .draw_parameters = true, .float_controls = true, - .float16 = device->physical_device->rad_info.has_packed_math_16bit && !device->physical_device->use_aco, + .float16 = device->physical_device->rad_info.has_packed_math_16bit, + .float32_atomic_add = true, .float64 = true, .geometry_streams = true, .image_ms_array = true, @@ -399,6 +465,8 @@ radv_shader_compile_to_nir(struct radv_device *device, .tessellation = true, .transform_feedback = true, .variable_pointers = true, + .vk_memory_model = true, + .vk_memory_model_device_scope = true, }, .ubo_addr_format = nir_address_format_32bit_index_offset, .ssbo_addr_format = nir_address_format_32bit_index_offset, @@ -406,6 +474,10 @@ radv_shader_compile_to_nir(struct radv_device *device, .push_const_addr_format = nir_address_format_logical, .shared_addr_format = nir_address_format_32bit_offset, .frag_coord_is_sysval = true, + .debug = { + .func = radv_spirv_nir_debug, + .private_data = &spirv_debug_data, + }, }; nir = spirv_to_nir(spirv, module->size / 4, spec_entries, num_spec_entries, @@ -423,6 +495,7 @@ radv_shader_compile_to_nir(struct radv_device *device, NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp); NIR_PASS_V(nir, nir_lower_returns); NIR_PASS_V(nir, nir_inline_functions); + NIR_PASS_V(nir, nir_copy_prop); NIR_PASS_V(nir, nir_opt_deref); /* Pick off the single entrypoint that we want */ @@ -451,10 +524,14 @@ radv_shader_compile_to_nir(struct radv_device *device, NIR_PASS_V(nir, nir_split_per_member_structs); if (nir->info.stage == MESA_SHADER_FRAGMENT && - device->physical_device->use_aco) + !radv_use_llvm_for_stage(device, nir->info.stage)) NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out); if (nir->info.stage == MESA_SHADER_FRAGMENT) - NIR_PASS_V(nir, nir_lower_input_attachments, true); + NIR_PASS_V(nir, nir_lower_input_attachments, + &(nir_input_attachment_options) { + .use_fragcoord_sysval = true, + .use_layer_id_sysval = false, + }); NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared, @@ -464,9 +541,22 @@ radv_shader_compile_to_nir(struct radv_device *device, NIR_PASS_V(nir, nir_lower_system_values); NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays); - NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout); + if (device->instance->debug_flags & RADV_DEBUG_DISCARD_TO_DEMOTE) NIR_PASS_V(nir, nir_lower_discard_to_demote); + + nir_lower_doubles_options lower_doubles = + nir->options->lower_doubles_options; + + if (device->physical_device->rad_info.chip_class == GFX6) { + /* GFX6 doesn't support v_floor_f64 and the precision + * of v_fract_f64 which is used to implement 64-bit + * floor is less than what Vulkan requires. + */ + lower_doubles |= nir_lower_dfloor; + } + + NIR_PASS_V(nir, nir_lower_doubles, NULL, lower_doubles); } /* Vulkan uses the separate-shader linking model */ @@ -511,6 +601,7 @@ radv_shader_compile_to_nir(struct radv_device *device, .lower_vote_eq_to_ballot = 1, .lower_quad_broadcast_dynamic = 1, .lower_quad_broadcast_dynamic_to_const = gfx7minus, + .lower_shuffle_to_swizzle_amd = 1, }); nir_lower_load_const_to_scalar(nir); @@ -518,6 +609,10 @@ radv_shader_compile_to_nir(struct radv_device *device, if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT)) radv_optimize_nir(nir, false, true); + /* call radv_nir_lower_ycbcr_textures() late as there might still be + * tex with undef texture/sampler before first optimization */ + NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout); + /* We call nir_lower_var_copies() after the first radv_optimize_nir() * to remove any copies introduced by nir_opt_find_array_copies(). */ @@ -558,27 +653,24 @@ type_size_vec4(const struct glsl_type *type, bool bindless) static nir_variable * find_layer_in_var(nir_shader *nir) { - nir_foreach_variable(var, &nir->inputs) { - if (var->data.location == VARYING_SLOT_LAYER) { - return var; - } - } - nir_variable *var = - nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id"); + nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_LAYER); + if (var != NULL) + return var; + + var = nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id"); var->data.location = VARYING_SLOT_LAYER; var->data.interpolation = INTERP_MODE_FLAT; return var; } /* We use layered rendering to implement multiview, which means we need to map - * view_index to gl_Layer. The attachment lowering also uses needs to know the - * layer so that it can sample from the correct layer. The code generates a - * load from the layer_id sysval, but since we don't have a way to get at this - * information from the fragment shader, we also need to lower this to the - * gl_Layer varying. This pass lowers both to a varying load from the LAYER - * slot, before lowering io, so that nir_assign_var_locations() will give the - * LAYER varying the correct driver_location. + * view_index to gl_Layer. The code generates a load from the layer_id sysval, + * but since we don't have a way to get at this information from the fragment + * shader, we also need to lower this to the gl_Layer varying. This pass + * lowers both to a varying load from the LAYER slot, before lowering io, so + * that nir_assign_var_locations() will give the LAYER varying the correct + * driver_location. */ static bool @@ -596,8 +688,7 @@ lower_view_index(nir_shader *nir) continue; nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr); - if (load->intrinsic != nir_intrinsic_load_view_index && - load->intrinsic != nir_intrinsic_load_layer_id) + if (load->intrinsic != nir_intrinsic_load_view_index) continue; if (!layer) @@ -620,7 +711,7 @@ void radv_lower_fs_io(nir_shader *nir) { NIR_PASS_V(nir, lower_view_index); - nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, + nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, MESA_SHADER_FRAGMENT); NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0); @@ -632,7 +723,7 @@ radv_lower_fs_io(nir_shader *nir) } -void * +static void * radv_alloc_shader_memory(struct radv_device *device, struct radv_shader_variant *shader) { @@ -668,7 +759,18 @@ radv_alloc_shader_memory(struct radv_device *device, (device->physical_device->rad_info.cpdma_prefetch_writes_memory ? 0 : RADEON_FLAG_READ_ONLY), RADV_BO_PRIORITY_SHADER); + if (!slab->bo) { + free(slab); + return NULL; + } + slab->ptr = (char*)device->ws->buffer_map(slab->bo); + if (!slab->ptr) { + device->ws->buffer_destroy(slab->bo); + free(slab); + return NULL; + } + list_inithead(&slab->shaders); mtx_lock(&device->shader_slab_mutex); @@ -779,8 +881,10 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice, */ if (pdevice->rad_info.chip_class >= GFX10) { vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1; + config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX10(info->tcs.num_lds_blocks); } else { vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1; + config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX9(info->tcs.num_lds_blocks); } } else { config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1); @@ -1005,15 +1109,14 @@ radv_shader_variant_create(struct radv_device *device, radv_postprocess_config(device->physical_device, &config, &binary->info, binary->stage, &variant->config); - if (radv_device_use_secure_compile(device->instance)) { + void *dest_ptr = radv_alloc_shader_memory(device, variant); + if (!dest_ptr) { if (binary->type == RADV_BINARY_TYPE_RTLD) ac_rtld_close(&rtld_binary); - - return variant; + free(variant); + return NULL; } - void *dest_ptr = radv_alloc_shader_memory(device, variant); - if (binary->type == RADV_BINARY_TYPE_RTLD) { struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary; struct ac_rtld_upload_info info = { @@ -1104,6 +1207,11 @@ shader_variant_compile(struct radv_device *device, enum radeon_family chip_family = device->physical_device->rad_info.family; struct radv_shader_binary *binary = NULL; + struct radv_shader_debug_data debug_data = { + .device = device, + .module = module, + }; + options->family = chip_family; options->chip_class = device->physical_device->rad_info.chip_class; options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader); @@ -1116,6 +1224,9 @@ shader_variant_compile(struct radv_device *device, options->address32_hi = device->physical_device->rad_info.address32_hi; options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug; options->use_ngg_streamout = device->physical_device->use_ngg_streamout; + options->enable_mrt_output_nan_fixup = device->instance->enable_mrt_output_nan_fixup; + options->debug.func = radv_compiler_debug; + options->debug.private_data = &debug_data; struct radv_shader_args args = {}; args.options = options; @@ -1128,14 +1239,14 @@ shader_variant_compile(struct radv_device *device, shader_count >= 2 ? shaders[shader_count - 2]->info.stage : MESA_SHADER_VERTEX); - if (!device->physical_device->use_aco || + if (radv_use_llvm_for_stage(device, stage) || options->dump_shader || options->record_ir) ac_init_llvm_once(); - if (device->physical_device->use_aco) { - aco_compile_shader(shader_count, shaders, &binary, &args); - } else { + if (radv_use_llvm_for_stage(device, stage)) { llvm_compile_shader(device, shader_count, shaders, &binary, &args); + } else { + aco_compile_shader(shader_count, shaders, &binary, &args); } binary->info = *info; @@ -1190,16 +1301,17 @@ radv_shader_variant_compile(struct radv_device *device, bool keep_shader_info, bool keep_statistic_info, struct radv_shader_binary **binary_out) { + gl_shader_stage stage = shaders[shader_count - 1]->info.stage; struct radv_nir_compiler_options options = {0}; options.layout = layout; if (key) options.key = *key; - options.explicit_scratch_args = device->physical_device->use_aco; + options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage); options.robust_buffer_access = device->robust_buffer_access; - return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage, info, + return shader_variant_compile(device, module, shaders, shader_count, stage, info, &options, false, keep_shader_info, keep_statistic_info, binary_out); } @@ -1212,11 +1324,12 @@ radv_create_gs_copy_shader(struct radv_device *device, bool multiview) { struct radv_nir_compiler_options options = {0}; + gl_shader_stage stage = MESA_SHADER_VERTEX; - options.explicit_scratch_args = device->physical_device->use_aco; + options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage); options.key.has_multiview_view_index = multiview; - return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX, + return shader_variant_compile(device, NULL, &shader, 1, stage, info, &options, true, keep_shader_info, keep_statistic_info, binary_out); }