radv: Move nir_opt_shrink_vectors() into the opt loop.
[mesa.git] / src / amd / vulkan / radv_shader.c
index c841a2f072623aab55372437d716eaf39f654d44..03cfe993bebc83776e1e881c1ffd7e42099e0d44 100644 (file)
 #include "radv_private.h"
 #include "radv_shader.h"
 #include "radv_shader_helper.h"
+#include "radv_shader_args.h"
 #include "nir/nir.h"
 #include "nir/nir_builder.h"
 #include "spirv/nir_spirv.h"
 
-#include <llvm-c/Core.h>
-#include <llvm-c/TargetMachine.h>
-#include <llvm-c/Support.h>
-
 #include "sid.h"
 #include "ac_binary.h"
 #include "ac_llvm_util.h"
@@ -79,8 +76,21 @@ static const struct nir_shader_compiler_options nir_options_llvm = {
        .lower_fpow = true,
        .lower_mul_2x32_64 = true,
        .lower_rotate = true,
+       .use_scoped_barrier = true,
        .max_unroll_iterations = 32,
        .use_interpolated_input_intrinsics = true,
+       /* nir_lower_int64() isn't actually called for the LLVM backend, but
+        * this helps the loop unrolling heuristics. */
+       .lower_int64_options = nir_lower_imul64 |
+                               nir_lower_imul_high64 |
+                               nir_lower_imul_2x32_64 |
+                               nir_lower_divmod64 |
+                               nir_lower_minmax64 |
+                               nir_lower_iabs64,
+       .lower_doubles_options = nir_lower_drcp |
+                                nir_lower_dsqrt |
+                                nir_lower_drsq |
+                                nir_lower_ddiv,
 };
 
 static const struct nir_shader_compiler_options nir_options_aco = {
@@ -109,8 +119,19 @@ static const struct nir_shader_compiler_options nir_options_aco = {
        .lower_fpow = true,
        .lower_mul_2x32_64 = true,
        .lower_rotate = true,
+       .use_scoped_barrier = true,
        .max_unroll_iterations = 32,
        .use_interpolated_input_intrinsics = true,
+       .lower_int64_options = nir_lower_imul64 |
+                               nir_lower_imul_high64 |
+                               nir_lower_imul_2x32_64 |
+                               nir_lower_divmod64 |
+                               nir_lower_minmax64 |
+                               nir_lower_iabs64,
+       .lower_doubles_options = nir_lower_drcp |
+                                nir_lower_dsqrt |
+                                nir_lower_drsq |
+                                nir_lower_ddiv,
 };
 
 bool
@@ -136,29 +157,6 @@ radv_can_dump_shader_stats(struct radv_device *device,
               module && !module->nir;
 }
 
-unsigned shader_io_get_unique_index(gl_varying_slot slot)
-{
-       /* handle patch indices separate */
-       if (slot == VARYING_SLOT_TESS_LEVEL_OUTER)
-               return 0;
-       if (slot == VARYING_SLOT_TESS_LEVEL_INNER)
-               return 1;
-       if (slot >= VARYING_SLOT_PATCH0 && slot <= VARYING_SLOT_TESS_MAX)
-               return 2 + (slot - VARYING_SLOT_PATCH0);
-       if (slot == VARYING_SLOT_POS)
-               return 0;
-       if (slot == VARYING_SLOT_PSIZ)
-               return 1;
-       if (slot == VARYING_SLOT_CLIP_DIST0)
-               return 2;
-       if (slot == VARYING_SLOT_CLIP_DIST1)
-               return 3;
-       /* 3 is reserved for clip dist as well */
-       if (slot >= VARYING_SLOT_VAR0 && slot <= VARYING_SLOT_VAR31)
-               return 4 + (slot - VARYING_SLOT_VAR0);
-       unreachable("illegal slot in get unique index\n");
-}
-
 VkResult radv_CreateShaderModule(
        VkDevice                                    _device,
        const VkShaderModuleCreateInfo*             pCreateInfo,
@@ -171,12 +169,15 @@ VkResult radv_CreateShaderModule(
        assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
        assert(pCreateInfo->flags == 0);
 
-       module = vk_alloc2(&device->alloc, pAllocator,
+       module = vk_alloc2(&device->vk.alloc, pAllocator,
                             sizeof(*module) + pCreateInfo->codeSize, 8,
                             VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
        if (module == NULL)
                return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
 
+       vk_object_base_init(&device->vk, &module->base,
+                           VK_OBJECT_TYPE_SHADER_MODULE);
+
        module->nir = NULL;
        module->size = pCreateInfo->codeSize;
        memcpy(module->data, pCreateInfo->pCode, module->size);
@@ -199,7 +200,8 @@ void radv_DestroyShaderModule(
        if (!module)
                return;
 
-       vk_free2(&device->alloc, pAllocator, module);
+       vk_object_base_finish(&module->base);
+       vk_free2(&device->vk.alloc, pAllocator, module);
 }
 
 void
@@ -233,7 +235,8 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
                NIR_PASS(progress, shader, nir_opt_copy_prop_vars);
                NIR_PASS(progress, shader, nir_opt_dead_write_vars);
                NIR_PASS(progress, shader, nir_remove_dead_variables,
-                        nir_var_function_temp);
+                        nir_var_function_temp | nir_var_shader_in | nir_var_shader_out,
+                        NULL);
 
                 NIR_PASS_V(shader, nir_lower_alu_to_scalar, NULL, NULL);
                 NIR_PASS_V(shader, nir_lower_phis_to_scalar);
@@ -275,16 +278,79 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
                 }
 
                 NIR_PASS(progress, shader, nir_opt_undef);
+                NIR_PASS(progress, shader, nir_opt_shrink_vectors);
                 if (shader->options->max_unroll_iterations) {
                         NIR_PASS(progress, shader, nir_opt_loop_unroll, 0);
                 }
         } while (progress && !optimize_conservatively);
 
        NIR_PASS(progress, shader, nir_opt_conditional_discard);
-        NIR_PASS(progress, shader, nir_opt_shrink_load);
         NIR_PASS(progress, shader, nir_opt_move, nir_move_load_ubo);
 }
 
+static void
+shared_var_info(const struct glsl_type *type, unsigned *size, unsigned *align)
+{
+       assert(glsl_type_is_vector_or_scalar(type));
+
+       uint32_t comp_size = glsl_type_is_boolean(type) ? 4 : glsl_get_bit_size(type) / 8;
+       unsigned length = glsl_get_vector_elements(type);
+       *size = comp_size * length,
+       *align = comp_size;
+}
+
+struct radv_shader_debug_data {
+       struct radv_device *device;
+       const struct radv_shader_module *module;
+};
+
+static void radv_spirv_nir_debug(void *private_data,
+                                enum nir_spirv_debug_level level,
+                                size_t spirv_offset,
+                                const char *message)
+{
+       struct radv_shader_debug_data *debug_data = private_data;
+       struct radv_instance *instance = debug_data->device->instance;
+
+       static const VkDebugReportFlagsEXT vk_flags[] = {
+               [NIR_SPIRV_DEBUG_LEVEL_INFO] = VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
+               [NIR_SPIRV_DEBUG_LEVEL_WARNING] = VK_DEBUG_REPORT_WARNING_BIT_EXT,
+               [NIR_SPIRV_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
+       };
+       char buffer[256];
+
+       snprintf(buffer, sizeof(buffer), "SPIR-V offset %lu: %s",
+                (unsigned long)spirv_offset, message);
+
+       vk_debug_report(&instance->debug_report_callbacks,
+                       vk_flags[level],
+                       VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
+                       (uint64_t)(uintptr_t)debug_data->module,
+                       0, 0, "radv", buffer);
+}
+
+static void radv_compiler_debug(void *private_data,
+                               enum radv_compiler_debug_level level,
+                               const char *message)
+{
+       struct radv_shader_debug_data *debug_data = private_data;
+       struct radv_instance *instance = debug_data->device->instance;
+
+       static const VkDebugReportFlagsEXT vk_flags[] = {
+               [RADV_COMPILER_DEBUG_LEVEL_PERFWARN] = VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
+               [RADV_COMPILER_DEBUG_LEVEL_ERROR] = VK_DEBUG_REPORT_ERROR_BIT_EXT,
+       };
+
+       /* VK_DEBUG_REPORT_DEBUG_BIT_EXT specifies diagnostic information
+        * from the implementation and layers.
+        */
+       vk_debug_report(&instance->debug_report_callbacks,
+                       vk_flags[level] | VK_DEBUG_REPORT_DEBUG_BIT_EXT,
+                       VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT,
+                       (uint64_t)(uintptr_t)debug_data->module,
+                       0, 0, "radv", message);
+}
+
 nir_shader *
 radv_shader_compile_to_nir(struct radv_device *device,
                           struct radv_shader_module *module,
@@ -293,11 +359,12 @@ radv_shader_compile_to_nir(struct radv_device *device,
                           const VkSpecializationInfo *spec_info,
                           const VkPipelineCreateFlags flags,
                           const struct radv_pipeline_layout *layout,
-                          bool use_aco)
+                          unsigned subgroup_size, unsigned ballot_bit_size)
 {
        nir_shader *nir;
-       const nir_shader_compiler_options *nir_options = use_aco ? &nir_options_aco :
-                                                                  &nir_options_llvm;
+       const nir_shader_compiler_options *nir_options =
+               radv_use_llvm_for_stage(device, stage) ? &nir_options_llvm : &nir_options_aco;
+
        if (module->nir) {
                /* Some things such as our meta clear/blit code will give us a NIR
                 * shader directly.  In that case, we just ignore the SPIR-V entirely
@@ -318,26 +385,48 @@ radv_shader_compile_to_nir(struct radv_device *device,
                struct nir_spirv_specialization *spec_entries = NULL;
                if (spec_info && spec_info->mapEntryCount > 0) {
                        num_spec_entries = spec_info->mapEntryCount;
-                       spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
+                       spec_entries = calloc(num_spec_entries, sizeof(*spec_entries));
                        for (uint32_t i = 0; i < num_spec_entries; i++) {
                                VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
                                const void *data = spec_info->pData + entry.offset;
                                assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
 
                                spec_entries[i].id = spec_info->pMapEntries[i].constantID;
-                               if (spec_info->dataSize == 8)
-                                       spec_entries[i].data64 = *(const uint64_t *)data;
-                               else
-                                       spec_entries[i].data32 = *(const uint32_t *)data;
+                               switch (entry.size) {
+                               case 8:
+                                       spec_entries[i].value.u64 = *(const uint64_t *)data;
+                                       break;
+                               case 4:
+                                       spec_entries[i].value.u32 = *(const uint32_t *)data;
+                                       break;
+                               case 2:
+                                       spec_entries[i].value.u16 = *(const uint16_t *)data;
+                                       break;
+                               case 1:
+                                       spec_entries[i].value.u8 = *(const uint8_t *)data;
+                                       break;
+                               default:
+                                       assert(!"Invalid spec constant size");
+                                       break;
+                               }
                        }
                }
+
+               struct radv_shader_debug_data spirv_debug_data = {
+                       .device = device,
+                       .module = module,
+               };
                const struct spirv_to_nir_options spirv_options = {
                        .lower_ubo_ssbo_access_to_offsets = true,
                        .caps = {
+                               .amd_fragment_mask = true,
                                .amd_gcn_shader = true,
-                               .amd_shader_ballot = device->physical_device->use_shader_ballot,
+                               .amd_image_gather_bias_lod = true,
+                               .amd_image_read_write_lod = true,
+                               .amd_shader_ballot = true,
+                               .amd_shader_explicit_vertex_parameter = true,
                                .amd_trinary_minmax = true,
-                               .demote_to_helper_invocation = device->physical_device->use_aco,
+                               .demote_to_helper_invocation = true,
                                .derivative_group = true,
                                .descriptor_array_dynamic_indexing = true,
                                .descriptor_array_non_uniform_indexing = true,
@@ -345,15 +434,18 @@ radv_shader_compile_to_nir(struct radv_device *device,
                                .device_group = true,
                                .draw_parameters = true,
                                .float_controls = true,
-                               .float16 = !device->physical_device->use_aco,
+                               .float16 = device->physical_device->rad_info.has_packed_math_16bit,
+                               .float32_atomic_add = true,
                                .float64 = true,
                                .geometry_streams = true,
+                               .image_ms_array = true,
                                .image_read_without_format = true,
                                .image_write_without_format = true,
-                               .int8 = !device->physical_device->use_aco,
-                               .int16 = !device->physical_device->use_aco,
+                               .int8 = true,
+                               .int16 = true,
                                .int64 = true,
                                .int64_atomics = true,
+                               .min_lod = true,
                                .multiview = true,
                                .physical_storage_buffer_address = true,
                                .post_depth_coverage = true,
@@ -361,8 +453,8 @@ radv_shader_compile_to_nir(struct radv_device *device,
                                .shader_clock = true,
                                .shader_viewport_index_layer = true,
                                .stencil_export = true,
-                               .storage_8bit = !device->physical_device->use_aco,
-                               .storage_16bit = !device->physical_device->use_aco,
+                               .storage_8bit = true,
+                               .storage_16bit = true,
                                .storage_image_ms = true,
                                .subgroup_arithmetic = true,
                                .subgroup_ballot = true,
@@ -373,6 +465,8 @@ radv_shader_compile_to_nir(struct radv_device *device,
                                .tessellation = true,
                                .transform_feedback = true,
                                .variable_pointers = true,
+                               .vk_memory_model = true,
+                               .vk_memory_model_device_scope = true,
                        },
                        .ubo_addr_format = nir_address_format_32bit_index_offset,
                        .ssbo_addr_format = nir_address_format_32bit_index_offset,
@@ -380,6 +474,10 @@ radv_shader_compile_to_nir(struct radv_device *device,
                        .push_const_addr_format = nir_address_format_logical,
                        .shared_addr_format = nir_address_format_32bit_offset,
                        .frag_coord_is_sysval = true,
+                       .debug = {
+                               .func = radv_spirv_nir_debug,
+                               .private_data = &spirv_debug_data,
+                       },
                };
                nir = spirv_to_nir(spirv, module->size / 4,
                                   spec_entries, num_spec_entries,
@@ -394,9 +492,10 @@ radv_shader_compile_to_nir(struct radv_device *device,
                 * inline functions.  That way they get properly initialized at the top
                 * of the function and not at the top of its caller.
                 */
-               NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
+               NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp);
                NIR_PASS_V(nir, nir_lower_returns);
                NIR_PASS_V(nir, nir_inline_functions);
+               NIR_PASS_V(nir, nir_copy_prop);
                NIR_PASS_V(nir, nir_opt_deref);
 
                /* Pick off the single entrypoint that we want */
@@ -411,12 +510,12 @@ radv_shader_compile_to_nir(struct radv_device *device,
                /* Make sure we lower constant initializers on output variables so that
                 * nir_remove_dead_variables below sees the corresponding stores
                 */
-               NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_shader_out);
+               NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_shader_out);
 
                /* Now that we've deleted all but the main function, we can go ahead and
                 * lower the rest of the constant initializers.
                 */
-               NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
+               NIR_PASS_V(nir, nir_lower_variable_initializers, ~0);
 
                /* Split member structs.  We do this before lower_io_to_temporaries so that
                 * it doesn't lower system values to temporaries by accident.
@@ -424,19 +523,40 @@ radv_shader_compile_to_nir(struct radv_device *device,
                NIR_PASS_V(nir, nir_split_var_copies);
                NIR_PASS_V(nir, nir_split_per_member_structs);
 
-               if (nir->info.stage == MESA_SHADER_FRAGMENT && use_aco)
+               if (nir->info.stage == MESA_SHADER_FRAGMENT &&
+                   !radv_use_llvm_for_stage(device, nir->info.stage))
                         NIR_PASS_V(nir, nir_lower_io_to_vector, nir_var_shader_out);
                if (nir->info.stage == MESA_SHADER_FRAGMENT)
-                       NIR_PASS_V(nir, nir_lower_input_attachments, true);
+                       NIR_PASS_V(nir, nir_lower_input_attachments,
+                                  &(nir_input_attachment_options) {
+                                       .use_fragcoord_sysval = true,
+                                       .use_layer_id_sysval = false,
+                                  });
 
                NIR_PASS_V(nir, nir_remove_dead_variables,
-                          nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared);
+                          nir_var_shader_in | nir_var_shader_out | nir_var_system_value | nir_var_mem_shared,
+                          NULL);
 
                NIR_PASS_V(nir, nir_propagate_invariant);
 
                NIR_PASS_V(nir, nir_lower_system_values);
                NIR_PASS_V(nir, nir_lower_clip_cull_distance_arrays);
-               NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
+
+               if (device->instance->debug_flags & RADV_DEBUG_DISCARD_TO_DEMOTE)
+                       NIR_PASS_V(nir, nir_lower_discard_to_demote);
+
+               nir_lower_doubles_options lower_doubles =
+                       nir->options->lower_doubles_options;
+
+               if (device->physical_device->rad_info.chip_class == GFX6) {
+                       /* GFX6 doesn't support v_floor_f64 and the precision
+                        * of v_fract_f64 which is used to implement 64-bit
+                        * floor is less than what Vulkan requires.
+                        */
+                       lower_doubles |= nir_lower_dfloor;
+               }
+
+               NIR_PASS_V(nir, nir_lower_doubles, NULL, lower_doubles);
        }
 
        /* Vulkan uses the separate-shader linking model */
@@ -444,6 +564,9 @@ radv_shader_compile_to_nir(struct radv_device *device,
 
        nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
 
+       if (nir->info.stage == MESA_SHADER_GEOMETRY)
+               nir_lower_gs_intrinsics(nir, true);
+
        static const nir_lower_tex_options tex_options = {
          .lower_txp = ~0,
          .lower_tg4_offsets = true,
@@ -466,15 +589,19 @@ radv_shader_compile_to_nir(struct radv_device *device,
        nir_split_var_copies(nir);
 
        nir_lower_global_vars_to_local(nir);
-       nir_remove_dead_variables(nir, nir_var_function_temp);
+       nir_remove_dead_variables(nir, nir_var_function_temp, NULL);
+       bool gfx7minus = device->physical_device->rad_info.chip_class <= GFX7;
        nir_lower_subgroups(nir, &(struct nir_lower_subgroups_options) {
-                       .subgroup_size = 64,
-                       .ballot_bit_size = 64,
+                       .subgroup_size = subgroup_size,
+                       .ballot_bit_size = ballot_bit_size,
                        .lower_to_scalar = 1,
                        .lower_subgroup_masks = 1,
                        .lower_shuffle = 1,
                        .lower_shuffle_to_32bit = 1,
                        .lower_vote_eq_to_ballot = 1,
+                       .lower_quad_broadcast_dynamic = 1,
+                       .lower_quad_broadcast_dynamic_to_const = gfx7minus,
+                       .lower_shuffle_to_swizzle_amd = 1,
                });
 
        nir_lower_load_const_to_scalar(nir);
@@ -482,11 +609,23 @@ radv_shader_compile_to_nir(struct radv_device *device,
        if (!(flags & VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT))
                radv_optimize_nir(nir, false, true);
 
+       /* call radv_nir_lower_ycbcr_textures() late as there might still be
+        * tex with undef texture/sampler before first optimization */
+       NIR_PASS_V(nir, radv_nir_lower_ycbcr_textures, layout);
+
        /* We call nir_lower_var_copies() after the first radv_optimize_nir()
         * to remove any copies introduced by nir_opt_find_array_copies().
         */
        nir_lower_var_copies(nir);
 
+       /* Lower deref operations for compute shared memory. */
+       if (nir->info.stage == MESA_SHADER_COMPUTE) {
+               NIR_PASS_V(nir, nir_lower_vars_to_explicit_types,
+                          nir_var_mem_shared, shared_var_info);
+               NIR_PASS_V(nir, nir_lower_explicit_io,
+                          nir_var_mem_shared, nir_address_format_32bit_offset);
+       }
+
        /* Lower large variables that are always constant with load_constant
         * intrinsics, which get turned into PC-relative loads from a data
         * section next to the shader.
@@ -514,27 +653,24 @@ type_size_vec4(const struct glsl_type *type, bool bindless)
 static nir_variable *
 find_layer_in_var(nir_shader *nir)
 {
-       nir_foreach_variable(var, &nir->inputs) {
-               if (var->data.location == VARYING_SLOT_LAYER) {
-                       return var;
-               }
-       }
-
        nir_variable *var =
-               nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
+               nir_find_variable_with_location(nir, nir_var_shader_in, VARYING_SLOT_LAYER);
+       if (var != NULL)
+               return var;
+
+       var = nir_variable_create(nir, nir_var_shader_in, glsl_int_type(), "layer id");
        var->data.location = VARYING_SLOT_LAYER;
        var->data.interpolation = INTERP_MODE_FLAT;
        return var;
 }
 
 /* We use layered rendering to implement multiview, which means we need to map
- * view_index to gl_Layer. The attachment lowering also uses needs to know the
- * layer so that it can sample from the correct layer. The code generates a
- * load from the layer_id sysval, but since we don't have a way to get at this
- * information from the fragment shader, we also need to lower this to the
- * gl_Layer varying.  This pass lowers both to a varying load from the LAYER
- * slot, before lowering io, so that nir_assign_var_locations() will give the
- * LAYER varying the correct driver_location.
+ * view_index to gl_Layer. The code generates a load from the layer_id sysval,
+ * but since we don't have a way to get at this information from the fragment
+ * shader, we also need to lower this to the gl_Layer varying.  This pass
+ * lowers both to a varying load from the LAYER slot, before lowering io, so
+ * that nir_assign_var_locations() will give the LAYER varying the correct
+ * driver_location.
  */
 
 static bool
@@ -552,8 +688,7 @@ lower_view_index(nir_shader *nir)
                                continue;
 
                        nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr);
-                       if (load->intrinsic != nir_intrinsic_load_view_index &&
-                           load->intrinsic != nir_intrinsic_load_layer_id)
+                       if (load->intrinsic != nir_intrinsic_load_view_index)
                                continue;
 
                        if (!layer)
@@ -576,7 +711,7 @@ void
 radv_lower_fs_io(nir_shader *nir)
 {
        NIR_PASS_V(nir, lower_view_index);
-       nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs,
+       nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs,
                                    MESA_SHADER_FRAGMENT);
 
        NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
@@ -588,7 +723,7 @@ radv_lower_fs_io(nir_shader *nir)
 }
 
 
-void *
+static void *
 radv_alloc_shader_memory(struct radv_device *device,
                         struct radv_shader_variant *shader)
 {
@@ -605,7 +740,7 @@ radv_alloc_shader_memory(struct radv_device *device,
                        }
                        offset = align_u64(s->bo_offset + s->code_size, 256);
                }
-               if (slab->size - offset >= shader->code_size) {
+               if (offset <= slab->size && slab->size - offset >= shader->code_size) {
                        shader->bo = slab->bo;
                        shader->bo_offset = offset;
                        list_addtail(&shader->slab_list, &slab->shaders);
@@ -617,14 +752,25 @@ radv_alloc_shader_memory(struct radv_device *device,
        mtx_unlock(&device->shader_slab_mutex);
        struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
 
-       slab->size = 256 * 1024;
+       slab->size = MAX2(256 * 1024, shader->code_size);
        slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
                                             RADEON_DOMAIN_VRAM,
                                             RADEON_FLAG_NO_INTERPROCESS_SHARING |
                                             (device->physical_device->rad_info.cpdma_prefetch_writes_memory ?
                                                     0 : RADEON_FLAG_READ_ONLY),
                                             RADV_BO_PRIORITY_SHADER);
+       if (!slab->bo) {
+               free(slab);
+               return NULL;
+       }
+
        slab->ptr = (char*)device->ws->buffer_map(slab->bo);
+       if (!slab->ptr) {
+               device->ws->buffer_destroy(slab->bo);
+               free(slab);
+               return NULL;
+       }
+
        list_inithead(&slab->shaders);
 
        mtx_lock(&device->shader_slab_mutex);
@@ -685,20 +831,6 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
        config_out->num_sgprs = num_sgprs;
        config_out->num_shared_vgprs = num_shared_vgprs;
 
-       /* Enable 64-bit and 16-bit denormals, because there is no performance
-        * cost.
-        *
-        * If denormals are enabled, all floating-point output modifiers are
-        * ignored.
-        *
-        * Don't enable denormals for 32-bit floats, because:
-        * - Floating-point output modifiers would be ignored by the hw.
-        * - Some opcodes don't support denormals, such as v_mad_f32. We would
-        *   have to stop using those.
-        * - GFX6 & GFX7 would be very slow.
-        */
-       config_out->float_mode |= V_00B028_FP_64_DENORMS;
-
        config_out->rsrc2 = S_00B12C_USER_SGPR(info->num_user_sgprs) |
                            S_00B12C_SCRATCH_EN(scratch_enabled);
 
@@ -749,8 +881,10 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
                         */
                        if (pdevice->rad_info.chip_class >= GFX10) {
                                vgpr_comp_cnt = info->vs.needs_instance_id ? 3 : 1;
+                               config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX10(info->tcs.num_lds_blocks);
                        } else {
                                vgpr_comp_cnt = info->vs.needs_instance_id ? 2 : 1;
+                               config_out->rsrc2 |= S_00B42C_LDS_SIZE_GFX9(info->tcs.num_lds_blocks);
                        }
                } else {
                        config_out->rsrc2 |= S_00B12C_OC_LDS_EN(1);
@@ -789,8 +923,8 @@ static void radv_postprocess_config(const struct radv_physical_device *pdevice,
                        }
 
                        config_out->rsrc1 |= S_00B128_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
-                       config_out->rsrc2 |= S_00B12C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
                }
+               config_out->rsrc2 |= S_00B12C_SHARED_VGPR_CNT(num_shared_vgpr_blocks);
                break;
        case MESA_SHADER_FRAGMENT:
                config_out->rsrc1 |= S_00B028_MEM_ORDERED(pdevice->rad_info.chip_class >= GFX10);
@@ -950,7 +1084,8 @@ radv_shader_variant_create(struct radv_device *device,
                        return NULL;
                }
 
-               if (!ac_rtld_read_config(&rtld_binary, &config)) {
+               if (!ac_rtld_read_config(&device->physical_device->rad_info,
+                                        &rtld_binary, &config)) {
                        ac_rtld_close(&rtld_binary);
                        free(variant);
                        return NULL;
@@ -974,15 +1109,14 @@ radv_shader_variant_create(struct radv_device *device,
        radv_postprocess_config(device->physical_device, &config, &binary->info,
                                binary->stage, &variant->config);
 
-       if (radv_device_use_secure_compile(device->instance)) {
+       void *dest_ptr = radv_alloc_shader_memory(device, variant);
+       if (!dest_ptr) {
                if (binary->type == RADV_BINARY_TYPE_RTLD)
                        ac_rtld_close(&rtld_binary);
-
-               return variant;
+               free(variant);
+               return NULL;
        }
 
-       void *dest_ptr = radv_alloc_shader_memory(device, variant);
-
        if (binary->type == RADV_BINARY_TYPE_RTLD) {
                struct radv_shader_binary_rtld* bin = (struct radv_shader_binary_rtld *)binary;
                struct ac_rtld_upload_info info = {
@@ -1016,15 +1150,20 @@ radv_shader_variant_create(struct radv_device *device,
                ac_rtld_close(&rtld_binary);
        } else {
                struct radv_shader_binary_legacy* bin = (struct radv_shader_binary_legacy *)binary;
-               memcpy(dest_ptr, bin->data, bin->code_size);
+               memcpy(dest_ptr, bin->data + bin->stats_size, bin->code_size);
 
                /* Add end-of-code markers for the UMR disassembler. */
                uint32_t *ptr32 = (uint32_t *)dest_ptr + bin->code_size / 4;
                for (unsigned i = 0; i < DEBUGGER_NUM_MARKERS; i++)
                        ptr32[i] = DEBUGGER_END_OF_CODE_MARKER;
 
-               variant->ir_string = bin->ir_size ? strdup((const char*)(bin->data + bin->code_size)) : NULL;
-               variant->disasm_string = bin->disasm_size ? strdup((const char*)(bin->data + bin->code_size + bin->ir_size)) : NULL;
+               variant->ir_string = bin->ir_size ? strdup((const char*)(bin->data + bin->stats_size + bin->code_size)) : NULL;
+               variant->disasm_string = bin->disasm_size ? strdup((const char*)(bin->data + bin->stats_size + bin->code_size + bin->ir_size)) : NULL;
+
+               if (bin->stats_size) {
+                       variant->statistics = calloc(bin->stats_size, 1);
+                       memcpy(variant->statistics, bin->data, bin->stats_size);
+               }
        }
        return variant;
 }
@@ -1062,83 +1201,69 @@ shader_variant_compile(struct radv_device *device,
                       struct radv_nir_compiler_options *options,
                       bool gs_copy_shader,
                       bool keep_shader_info,
-                      bool use_aco,
+                      bool keep_statistic_info,
                       struct radv_shader_binary **binary_out)
 {
        enum radeon_family chip_family = device->physical_device->rad_info.family;
        struct radv_shader_binary *binary = NULL;
 
+       struct radv_shader_debug_data debug_data = {
+               .device = device,
+                .module = module,
+        };
+
        options->family = chip_family;
        options->chip_class = device->physical_device->rad_info.chip_class;
        options->dump_shader = radv_can_dump_shader(device, module, gs_copy_shader);
        options->dump_preoptir = options->dump_shader &&
                                 device->instance->debug_flags & RADV_DEBUG_PREOPTIR;
        options->record_ir = keep_shader_info;
+       options->record_stats = keep_statistic_info;
        options->check_ir = device->instance->debug_flags & RADV_DEBUG_CHECKIR;
        options->tess_offchip_block_dw_size = device->tess_offchip_block_dw_size;
        options->address32_hi = device->physical_device->rad_info.address32_hi;
        options->has_ls_vgpr_init_bug = device->physical_device->rad_info.has_ls_vgpr_init_bug;
        options->use_ngg_streamout = device->physical_device->use_ngg_streamout;
-
-       if ((stage == MESA_SHADER_GEOMETRY && !options->key.vs_common_out.as_ngg) ||
-           gs_copy_shader)
-               options->wave_size = 64;
-       else if (stage == MESA_SHADER_COMPUTE)
-               options->wave_size = device->physical_device->cs_wave_size;
-       else if (stage == MESA_SHADER_FRAGMENT)
-               options->wave_size = device->physical_device->ps_wave_size;
-       else
-               options->wave_size = device->physical_device->ge_wave_size;
-
-       if (!use_aco || options->dump_shader || options->record_ir)
+       options->enable_mrt_output_nan_fixup = device->instance->enable_mrt_output_nan_fixup;
+       options->debug.func = radv_compiler_debug;
+       options->debug.private_data = &debug_data;
+
+       struct radv_shader_args args = {};
+       args.options = options;
+       args.shader_info = info;
+       args.is_gs_copy_shader = gs_copy_shader;
+       radv_declare_shader_args(&args, 
+                                gs_copy_shader ? MESA_SHADER_VERTEX
+                                               : shaders[shader_count - 1]->info.stage,
+                                shader_count >= 2,
+                                shader_count >= 2 ? shaders[shader_count - 2]->info.stage
+                                                  : MESA_SHADER_VERTEX);
+
+       if (radv_use_llvm_for_stage(device, stage) ||
+           options->dump_shader || options->record_ir)
                ac_init_llvm_once();
 
-       if (use_aco) {
-               aco_compile_shader(shader_count, shaders, &binary, info, options);
-               binary->info = *info;
+       if (radv_use_llvm_for_stage(device, stage)) {
+               llvm_compile_shader(device, shader_count, shaders, &binary, &args);
        } else {
-               enum ac_target_machine_options tm_options = 0;
-               struct ac_llvm_compiler ac_llvm;
-               bool thread_compiler;
-
-               if (options->supports_spill)
-                       tm_options |= AC_TM_SUPPORTS_SPILL;
-               if (device->instance->perftest_flags & RADV_PERFTEST_SISCHED)
-                       tm_options |= AC_TM_SISCHED;
-               if (options->check_ir)
-                       tm_options |= AC_TM_CHECK_IR;
-               if (device->instance->debug_flags & RADV_DEBUG_NO_LOAD_STORE_OPT)
-                       tm_options |= AC_TM_NO_LOAD_STORE_OPT;
-
-               thread_compiler = !(device->instance->debug_flags & RADV_DEBUG_NOTHREADLLVM);
-               radv_init_llvm_compiler(&ac_llvm,
-                                       thread_compiler,
-                                       chip_family, tm_options,
-                                       options->wave_size);
-
-               if (gs_copy_shader) {
-                       assert(shader_count == 1);
-                       radv_compile_gs_copy_shader(&ac_llvm, *shaders, &binary,
-                                                   info, options);
-               } else {
-                       radv_compile_nir_shader(&ac_llvm, &binary, info,
-                                               shaders, shader_count, options);
-               }
-
-               binary->info = *info;
-               radv_destroy_llvm_compiler(&ac_llvm, thread_compiler);
+               aco_compile_shader(shader_count, shaders, &binary, &args);
        }
 
+       binary->info = *info;
+
        struct radv_shader_variant *variant = radv_shader_variant_create(device, binary,
                                                                         keep_shader_info);
        if (!variant) {
                free(binary);
                return NULL;
        }
-       variant->aco_used = use_aco;
 
        if (options->dump_shader) {
-               fprintf(stderr, "disasm:\n%s\n", variant->disasm_string);
+               fprintf(stderr, "%s", radv_get_shader_name(info, shaders[0]->info.stage));
+               for (int i = 1; i < shader_count; ++i)
+                       fprintf(stderr, " + %s", radv_get_shader_name(info, shaders[i]->info.stage));
+
+               fprintf(stderr, "\ndisasm:\n%s\n", variant->disasm_string);
        }
 
 
@@ -1173,22 +1298,21 @@ radv_shader_variant_compile(struct radv_device *device,
                           struct radv_pipeline_layout *layout,
                           const struct radv_shader_variant_key *key,
                           struct radv_shader_info *info,
-                          bool keep_shader_info,
-                          bool use_aco,
+                          bool keep_shader_info, bool keep_statistic_info,
                           struct radv_shader_binary **binary_out)
 {
+       gl_shader_stage stage =  shaders[shader_count - 1]->info.stage;
        struct radv_nir_compiler_options options = {0};
 
        options.layout = layout;
        if (key)
                options.key = *key;
 
-       options.unsafe_math = !!(device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH);
-       options.supports_spill = true;
+       options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage);
        options.robust_buffer_access = device->robust_buffer_access;
 
-       return shader_variant_compile(device, module, shaders, shader_count, shaders[shader_count - 1]->info.stage, info,
-                                    &options, false, keep_shader_info, use_aco, binary_out);
+       return shader_variant_compile(device, module, shaders, shader_count, stage, info,
+                                    &options, false, keep_shader_info, keep_statistic_info, binary_out);
 }
 
 struct radv_shader_variant *
@@ -1196,15 +1320,17 @@ radv_create_gs_copy_shader(struct radv_device *device,
                           struct nir_shader *shader,
                           struct radv_shader_info *info,
                           struct radv_shader_binary **binary_out,
-                          bool keep_shader_info,
+                          bool keep_shader_info, bool keep_statistic_info,
                           bool multiview)
 {
        struct radv_nir_compiler_options options = {0};
+       gl_shader_stage stage = MESA_SHADER_VERTEX;
 
+       options.explicit_scratch_args = !radv_use_llvm_for_stage(device, stage);
        options.key.has_multiview_view_index = multiview;
 
-       return shader_variant_compile(device, NULL, &shader, 1, MESA_SHADER_VERTEX,
-                                     info, &options, true, keep_shader_info, false, binary_out);
+       return shader_variant_compile(device, NULL, &shader, 1, stage,
+                                     info, &options, true, keep_shader_info, keep_statistic_info, binary_out);
 }
 
 void
@@ -1222,6 +1348,7 @@ radv_shader_variant_destroy(struct radv_device *device,
        free(variant->nir_string);
        free(variant->disasm_string);
        free(variant->ir_string);
+       free(variant->statistics);
        free(variant);
 }
 
@@ -1316,14 +1443,12 @@ radv_get_max_waves(struct radv_device *device,
                unsigned vgprs = align(conf->num_vgprs, wave_size == 32 ? 8 : 4);
                max_simd_waves =
                        MIN2(max_simd_waves,
-                            RADV_NUM_PHYSICAL_VGPRS / vgprs);
+                            device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd / vgprs);
        }
 
-       /* LDS is 64KB per CU (4 SIMDs), divided into 16KB blocks per SIMD
-        * that PS can use.
-        */
+       unsigned max_lds_per_simd = device->physical_device->rad_info.lds_size_per_workgroup / device->physical_device->rad_info.num_simd_per_compute_unit;
        if (lds_per_wave)
-               max_simd_waves = MIN2(max_simd_waves, 16384 / lds_per_wave);
+               max_simd_waves = MIN2(max_simd_waves, max_lds_per_simd / lds_per_wave);
 
        return max_simd_waves;
 }
@@ -1353,13 +1478,23 @@ generate_shader_stats(struct radv_device *device,
                                   "Code Size: %d bytes\n"
                                   "LDS: %d blocks\n"
                                   "Scratch: %d bytes per wave\n"
-                                  "Max Waves: %d\n"
-                                  "********************\n\n\n",
+                                  "Max Waves: %d\n",
                                   conf->num_sgprs, conf->num_vgprs,
                                   conf->spilled_sgprs, conf->spilled_vgprs,
                                   variant->info.private_mem_vgprs, variant->exec_size,
                                   conf->lds_size, conf->scratch_bytes_per_wave,
                                   max_simd_waves);
+
+       if (variant->statistics) {
+               _mesa_string_buffer_printf(buf, "*** COMPILER STATS ***\n");
+               for (unsigned i = 0; i < variant->statistics->count; i++) {
+                       struct radv_compiler_statistic_info *info = &variant->statistics->infos[i];
+                       uint32_t value = variant->statistics->values[i];
+                       _mesa_string_buffer_printf(buf, "%s: %lu\n", info->name, value);
+               }
+       }
+
+       _mesa_string_buffer_printf(buf, "********************\n\n\n");
 }
 
 void
@@ -1408,7 +1543,7 @@ radv_GetShaderInfoAMD(VkDevice _device,
 
                        VkShaderStatisticsInfoAMD statistics = {};
                        statistics.shaderStageMask = shaderStage;
-                       statistics.numPhysicalVgprs = RADV_NUM_PHYSICAL_VGPRS;
+                       statistics.numPhysicalVgprs = device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd;
                        statistics.numPhysicalSgprs = device->physical_device->rad_info.num_physical_sgprs_per_simd;
                        statistics.numAvailableSgprs = statistics.numPhysicalSgprs;