radv: report the spirv-nir logs back to the application
[mesa.git] / src / amd / vulkan / radv_shader.c
index a53100fd48bd6685a30788895f2543d675a64a48..f3ea88b4e814086f992addd70659709d6147ca7b 100644 (file)
@@ -76,6 +76,7 @@ static const struct nir_shader_compiler_options nir_options_llvm = {
        .lower_fpow = true,
        .lower_mul_2x32_64 = true,
        .lower_rotate = true,
+       .use_scoped_barrier = true,
        .max_unroll_iterations = 32,
        .use_interpolated_input_intrinsics = true,
        /* nir_lower_int64() isn't actually called for the LLVM backend, but
@@ -118,6 +119,7 @@ static const struct nir_shader_compiler_options nir_options_aco = {
        .lower_fpow = true,
        .lower_mul_2x32_64 = true,
        .lower_rotate = true,
+       .use_scoped_barrier = true,
        .max_unroll_iterations = 32,
        .use_interpolated_input_intrinsics = true,
        .lower_int64_options = nir_lower_imul64 |
@@ -282,7 +284,7 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
         } while (progress && !optimize_conservatively);
 
        NIR_PASS(progress, shader, nir_opt_conditional_discard);
-        NIR_PASS(progress, shader, nir_opt_shrink_load);
+        NIR_PASS(progress, shader, nir_opt_shrink_vectors);
         NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
 }
 
@@ -297,6 +299,36 @@ shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
        *align = comp_size;
 }
 
+struct radv_spirv_debug_data {
+       struct radv_device *device;
+       const struct radv_shader_module *module;
+};
+
+static void radv_spirv_nir_debug(void *private_data,
+                                enum nir_spirv_debug_level level,
+                                size_t spirv_offset,
+                                const char *message)
+{
+       struct radv_spirv_debug_data *debug_data = private_data;
+       struct radv_instance *instance = debug_data->device->instance;
+
+       static const VkDebugReportFlagsEXT vk_flags[] = {
+               [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
+               [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
+               [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
+       };
+       char buffer[256];
+
+       snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s",
+                (unsigned long)spirv_offset, message);
+
+       vk_debug_report(&instance->debug_report_callbacks,
+                       vk_flags[level],
+                       VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
+                       (uint64_t)(uintptr_t)debug_data->module,
+                       0, 0, "radv", buffer);
+}
+
 nir_shader *
 radv_shader_compile_to_nir(struct radv_device *device,
                           struct radv_shader_module *module,
@@ -309,8 +341,7 @@ radv_shader_compile_to_nir(struct radv_device *device,
 {
        nir_shader *nir;
        const nir_shader_compiler_options *nir_options =
-               device->physical_device->use_aco ? &nir_options_aco :
-                                                  &nir_options_llvm;
+               radv_use_llvm_for_stage(device, stage) ? &nir_options_llvm : &nir_options_aco;
 
        if (module->nir) {
                /* Some things such as our meta clear/blit code will give us a NIR
@@ -358,6 +389,11 @@ radv_shader_compile_to_nir(struct radv_device *device,
                                }
                        }
                }
+
+               struct radv_spirv_debug_data spirv_debug_data = {
+                       .device = device,
+                       .module = module,
+               };
                const struct spirv_to_nir_options spirv_options = {
                        .lower_ubo_ssbo_access_to_offsets = true,
                        .caps = {
@@ -365,7 +401,7 @@ radv_shader_compile_to_nir(struct radv_device *device,
                                .amd_gcn_shader = true,
                                .amd_image_gather_bias_lod = true,
                                .amd_image_read_write_lod = true,
-                               .amd_shader_ballot = device->physical_device->use_shader_ballot,
+                               .amd_shader_ballot = true,
                                .amd_shader_explicit_vertex_parameter = true,
                                .amd_trinary_minmax = true,
                                .demote_to_helper_invocation = true,
@@ -377,6 +413,7 @@ radv_shader_compile_to_nir(struct radv_device *device,
                                .draw_parameters = true,
                                .float_controls = true,
                                .float16 = device->physical_device->rad_info.has_packed_math_16bit,
+                               .float32_atomic_add = true,
                                .float64 = true,
                                .geometry_streams = true,
                                .image_ms_array = true,
@@ -406,6 +443,8 @@ radv_shader_compile_to_nir(struct radv_device *device,
                                .tessellation = true,
                                .transform_feedback = true,
                                .variable_pointers = true,
+                               .vk_memory_model = true,
+                               .vk_memory_model_device_scope = true,
                        },
                        .ubo_addr_format = nir_address_format_32bit_index_offset,
                        .ssbo_addr_format = nir_address_format_32bit_index_offset,
@@ -413,6 +452,10 @@ radv_shader_compile_to_nir(struct radv_device *device,
                        .push_const_addr_format = nir_address_format_logical,
                        .shared_addr_format = nir_address_format_32bit_offset,
                        .frag_coord_is_sysval = true,
+                       .debug = {
+                               .func = radv_spirv_nir_debug,
+                               .private_data = &spirv_debug_data,
+                       },
                };
                nir = spirv_to_nir(spirv, module->size / 4,
                                   spec_entries, num_spec_entries,
@@ -430,6 +473,7 @@ radv_shader_compile_to_nir(struct radv_device *device,
                NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
                NIR_PASS_V(nir, nir_lower_returns);
                NIR_PASS_V(nir, nir_inline_functions);
+               NIR_PASS_V(nir, nir_copy_prop);
                NIR_PASS_V(nir, nir_opt_deref);
 
                /* Pick off the single entrypoint that we want */
@@ -458,7 +502,7 @@ radv_shader_compile_to_nir(struct radv_device *device,
                NIR_PASS_V(nir, nir_split_per_member_structs);
 
                if (nir->info.stage == MESA_SHADER_FRAGMENT &&
-                   device->physical_device->use_aco)
+                   !radv_use_llvm_for_stage(device, nir->info.stage))
                         NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out);
                if (nir->info.stage == MESA_SHADER_FRAGMENT)
                        NIR_PASS_V(nir, nir_lower_input_attachments, true);
@@ -471,13 +515,21 @@ radv_shader_compile_to_nir(struct radv_device *device,
 
                NIR_PASS_V(nir, nir_lower_system_values);
                NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
-               NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
+
                if (device->instance->debug_flags & RADV_DEBUG_DISCARD_TO_DEMOTE)
                        NIR_PASS_V(nir, nir_lower_discard_to_demote);
 
                nir_lower_doubles_options lower_doubles =
                        nir->options->lower_doubles_options;
 
+               if (device->physical_device->rad_info.chip_class == GFX6) {
+                       /* GFX6 doesn't support v_floor_f64 and the precision
+                        * of v_fract_f64 which is used to implement 64-bit
+                        * floor is less than what Vulkan requires.
+                        */
+                       lower_doubles |= nir_lower_dfloor;
+               }
+
                NIR_PASS_V(nir, nir_lower_doubles, NULL, lower_doubles);
        }
 
@@ -523,6 +575,7 @@ radv_shader_compile_to_nir(struct radv_device *device,
                        .lower_vote_eq_to_ballot = 1,
                        .lower_quad_broadcast_dynamic = 1,
                        .lower_quad_broadcast_dynamic_to_const = gfx7minus,
+                       .lower_shuffle_to_swizzle_amd = 1,
                });
 
        nir_lower_load_const_to_scalar(nir);
@@ -530,6 +583,10 @@ radv_shader_compile_to_nir(struct radv_device *device,
        if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
                radv_optimize_nir(nir, false, true);
 
+       /* call radv_nir_lower_ycbcr_textures() late as there might still be
+        * tex with undef texture/sampler before first optimization */
+       NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
+
        /* We call nir_lower_var_copies() after the first radv_optimize_nir()
         * to remove any copies introduced by nir_opt_find_array_copies().
         */
@@ -570,14 +627,12 @@ type_size_vec4(const struct glsl_type *type, bool bindless)
 static nir_variable *
 find_layer_in_var(nir_shader *nir)
 {
-       nir_foreach_variable(var, &nir->inputs) {
-               if (var->data.location == VARYING_SLOT_LAYER) {
-                       return var;
-               }
-       }
-
        nir_variable *var =
-               nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
+               nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_LAYER);
+       if (var != NULL)
+               return var;
+
+       var = nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
        var->data.location = VARYING_SLOT_LAYER;
        var->data.interpolation = INTERP_MODE_FLAT;
        return var;
@@ -632,7 +687,7 @@ void
 radv_lower_fs_io(nir_shader *nir)
 {
        NIR_PASS_V(nir, lower_view_index);
-       nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs,
+       nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs,
                                    MESA_SHADER_FRAGMENT);
 
        NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
@@ -802,8 +857,10 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
                         */
                        if (pdevice->rad_info.chip_class >= GFX10) {
                                vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1;
+                               config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX10(info->tcs.num_lds_blocks);
                        } else {
                                vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
+                               config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX9(info->tcs.num_lds_blocks);
                        }
                } else {
                        config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
@@ -1028,13 +1085,6 @@ radv_shader_variant_create(struct radv_device *device,
        radv_postprocess_config(device->physical_device, &config, &binary->info,
                                binary->stage, &variant->config);
 
-       if (radv_device_use_secure_compile(device->instance)) {
-               if (binary->type == RADV_BINARY_TYPE_RTLD)
-                       ac_rtld_close(&rtld_binary);
-
-               return variant;
-       }
-
        void *dest_ptr = radv_alloc_shader_memory(device, variant);
        if (!dest_ptr) {
                if (binary->type == RADV_BINARY_TYPE_RTLD)
@@ -1158,14 +1208,14 @@ shader_variant_compile(struct radv_device *device,
                                 shader_count >= 2 ? shaders[shader_count - 2]->info.stage
                                                   : MESA_SHADER_VERTEX);
 
-       if (!device->physical_device->use_aco ||
+       if (radv_use_llvm_for_stage(device, stage) ||
            options->dump_shader || options->record_ir)
                ac_init_llvm_once();
 
-       if (device->physical_device->use_aco) {
-               aco_compile_shader(shader_count, shaders, &binary, &args);
-       } else {
+       if (radv_use_llvm_for_stage(device, stage)) {
                llvm_compile_shader(device, shader_count, shaders, &binary, &args);
+       } else {
+               aco_compile_shader(shader_count, shaders, &binary, &args);
        }
 
        binary->info = *info;
@@ -1220,16 +1270,17 @@ radv_shader_variant_compile(struct radv_device *device,
                           bool keep_shader_info, bool keep_statistic_info,
                           struct radv_shader_binary **binary_out)
 {
+       gl_shader_stage stage =  shaders[shader_count - 1]->info.stage;
        struct radv_nir_compiler_options options = {0};
 
        options.layout = layout;
        if (key)
                options.key = *key;
 
-       options.explicit_scratch_args = device->physical_device->use_aco;
+       options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage);
        options.robust_buffer_access = device->robust_buffer_access;
 
-       return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage, info,
+       return shader_variant_compile(device, module, shaders, shader_count, stage, info,
                                     &options, false, keep_shader_info, keep_statistic_info, binary_out);
 }
 
@@ -1242,11 +1293,12 @@ radv_create_gs_copy_shader(struct radv_device *device,
                           bool multiview)
 {
        struct radv_nir_compiler_options options = {0};
+       gl_shader_stage stage = MESA_SHADER_VERTEX;
 
-       options.explicit_scratch_args = device->physical_device->use_aco;
+       options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage);
        options.key.has_multiview_view_index = multiview;
 
-       return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
+       return shader_variant_compile(device, NULL, &shader, 1, stage,
                                      info, &options, true, keep_shader_info, keep_statistic_info, binary_out);
 }