radeonsi: prepare for driver-specific driconf options
[mesa.git] / src / gallium / drivers / radeonsi / si_pipe.c
index 47426b41da6202df2aea0554c4f2639221968d22..d276b885c112591d91d508f2b63a6e670725924f 100644 (file)
 #include "sid.h"
 
 #include "radeon/radeon_uvd.h"
+#include "util/hash_table.h"
 #include "util/u_memory.h"
 #include "util/u_suballoc.h"
 #include "util/u_tests.h"
 #include "vl/vl_decoder.h"
 #include "../ddebug/dd_util.h"
 
+#include "compiler/nir/nir.h"
+
 /*
  * pipe_context
  */
@@ -63,6 +66,7 @@ static void si_destroy_context(struct pipe_context *context)
        free(sctx->border_color_table);
        r600_resource_reference(&sctx->scratch_buffer, NULL);
        r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
+       r600_resource_reference(&sctx->wait_mem_scratch, NULL);
 
        si_pm4_free_state(sctx, sctx->init_config, ~0);
        if (sctx->init_config_gs_rings)
@@ -76,10 +80,10 @@ static void si_destroy_context(struct pipe_context *context)
                sctx->b.b.delete_depth_stencil_alpha_state(&sctx->b.b, sctx->custom_dsa_flush);
        if (sctx->custom_blend_resolve)
                sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_resolve);
-       if (sctx->custom_blend_decompress)
-               sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_decompress);
-       if (sctx->custom_blend_fastclear)
-               sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_fastclear);
+       if (sctx->custom_blend_fmask_decompress)
+               sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_fmask_decompress);
+       if (sctx->custom_blend_eliminate_fastclear)
+               sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_eliminate_fastclear);
        if (sctx->custom_blend_dcc_decompress)
                sctx->b.b.delete_blend_state(&sctx->b.b, sctx->custom_blend_dcc_decompress);
 
@@ -94,6 +98,17 @@ static void si_destroy_context(struct pipe_context *context)
        r600_resource_reference(&sctx->last_trace_buf, NULL);
        radeon_clear_saved_cs(&sctx->last_gfx);
 
+       pb_slabs_deinit(&sctx->bindless_descriptor_slabs);
+       util_dynarray_fini(&sctx->bindless_descriptors);
+
+       _mesa_hash_table_destroy(sctx->tex_handles, NULL);
+       _mesa_hash_table_destroy(sctx->img_handles, NULL);
+
+       util_dynarray_fini(&sctx->resident_tex_handles);
+       util_dynarray_fini(&sctx->resident_img_handles);
+       util_dynarray_fini(&sctx->resident_tex_needs_color_decompress);
+       util_dynarray_fini(&sctx->resident_img_needs_color_decompress);
+       util_dynarray_fini(&sctx->resident_tex_needs_depth_decompress);
        FREE(sctx);
 }
 
@@ -128,11 +143,12 @@ si_create_llvm_target_machine(struct si_screen *sscreen)
        char features[256];
 
        snprintf(features, sizeof(features),
-                "+DumpCode,+vgpr-spilling,-fp32-denormals,+fp64-denormals%s%s",
+                "+DumpCode,+vgpr-spilling,-fp32-denormals,+fp64-denormals%s%s%s",
                 sscreen->b.chip_class >= GFX9 ? ",+xnack" : ",-xnack",
+                sscreen->llvm_has_working_vgpr_indexing ? "" : ",-promote-alloca",
                 sscreen->b.debug_flags & DBG_SI_SCHED ? ",+si-scheduler" : "");
 
-       return LLVMCreateTargetMachine(si_llvm_get_amdgpu_target(triple), triple,
+       return LLVMCreateTargetMachine(ac_get_llvm_target(triple), triple,
                                       r600_get_llvm_processor_name(sscreen->b.family),
                                       features,
                                       LLVMCodeGenLevelDefault,
@@ -151,9 +167,6 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
        if (!sctx)
                return NULL;
 
-       if (sscreen->b.debug_flags & DBG_CHECK_VM)
-               flags |= PIPE_CONTEXT_DEBUG;
-
        if (flags & PIPE_CONTEXT_DEBUG)
                sscreen->record_llvm_ir = true; /* racy but not critical */
 
@@ -192,10 +205,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
            sscreen->b.chip_class != SI &&
            /* These can't use CE due to a power gating bug in the kernel. */
            sscreen->b.family != CHIP_CARRIZO &&
-           sscreen->b.family != CHIP_STONEY &&
-           /* Some CE bug is causing green screen corruption w/ MPV video
-            * playback and occasional corruption w/ 3D. */
-           sscreen->b.chip_class != GFX9) {
+           sscreen->b.family != CHIP_STONEY) {
                sctx->ce_ib = ws->cs_add_const_ib(sctx->b.gfx.cs);
                if (!sctx->ce_ib)
                        goto fail;
@@ -260,6 +270,23 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
        /* these must be last */
        si_begin_new_cs(sctx);
 
+       if (sctx->b.chip_class >= GFX9) {
+               sctx->wait_mem_scratch = (struct r600_resource*)
+                       pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 4);
+               if (!sctx->wait_mem_scratch)
+                       goto fail;
+
+               /* Initialize the memory. */
+               struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+               radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+               radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
+                           S_370_WR_CONFIRM(1) |
+                           S_370_ENGINE_SEL(V_370_ME));
+               radeon_emit(cs, sctx->wait_mem_scratch->gpu_address);
+               radeon_emit(cs, sctx->wait_mem_scratch->gpu_address >> 32);
+               radeon_emit(cs, sctx->wait_mem_number);
+       }
+
        /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
         * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
        if (sctx->b.chip_class == CIK) {
@@ -281,6 +308,8 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
 
                si_set_rw_buffer(sctx, SI_HS_CONST_DEFAULT_TESS_LEVELS,
                                 &sctx->null_const_buf);
+               si_set_rw_buffer(sctx, SI_VS_CONST_INSTANCE_DIVISORS,
+                                &sctx->null_const_buf);
                si_set_rw_buffer(sctx, SI_VS_CONST_CLIP_PLANES,
                                 &sctx->null_const_buf);
                si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE,
@@ -316,6 +345,27 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
 
        sctx->tm = si_create_llvm_target_machine(sscreen);
 
+       /* Create a slab allocator for all bindless descriptors. */
+       if (!pb_slabs_init(&sctx->bindless_descriptor_slabs, 6, 6, 1, sctx,
+                          si_bindless_descriptor_can_reclaim_slab,
+                          si_bindless_descriptor_slab_alloc,
+                          si_bindless_descriptor_slab_free))
+               goto fail;
+
+       util_dynarray_init(&sctx->bindless_descriptors, NULL);
+
+       /* Bindless handles. */
+       sctx->tex_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+                                                   _mesa_key_pointer_equal);
+       sctx->img_handles = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
+                                                   _mesa_key_pointer_equal);
+
+       util_dynarray_init(&sctx->resident_tex_handles, NULL);
+       util_dynarray_init(&sctx->resident_img_handles, NULL);
+       util_dynarray_init(&sctx->resident_tex_needs_color_decompress, NULL);
+       util_dynarray_init(&sctx->resident_img_needs_color_decompress, NULL);
+       util_dynarray_init(&sctx->resident_tex_needs_depth_decompress, NULL);
+
        return &sctx->b.b;
 fail:
        fprintf(stderr, "radeonsi: Failed to create a context.\n");
@@ -327,7 +377,12 @@ static struct pipe_context *si_pipe_create_context(struct pipe_screen *screen,
                                                   void *priv, unsigned flags)
 {
        struct si_screen *sscreen = (struct si_screen *)screen;
-       struct pipe_context *ctx = si_create_context(screen, flags);
+       struct pipe_context *ctx;
+
+       if (sscreen->b.debug_flags & DBG_CHECK_VM)
+               flags |= PIPE_CONTEXT_DEBUG;
+
+       ctx = si_create_context(screen, flags);
 
        if (!(flags & PIPE_CONTEXT_PREFER_THREADED))
                return ctx;
@@ -455,6 +510,13 @@ static int si_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
        case PIPE_CAP_DOUBLES:
        case PIPE_CAP_TGSI_TEX_TXF_LZ:
        case PIPE_CAP_TGSI_TES_LAYER_VIEWPORT:
+       case PIPE_CAP_BINDLESS_TEXTURE:
+       case PIPE_CAP_QUERY_TIMESTAMP:
+       case PIPE_CAP_QUERY_TIME_ELAPSED:
+       case PIPE_CAP_NIR_SAMPLERS_AS_DEREF:
+       case PIPE_CAP_QUERY_SO_OVERFLOW:
+               return 1;
+
        case PIPE_CAP_INT64:
        case PIPE_CAP_INT64_DIVMOD:
        case PIPE_CAP_TGSI_CLOCK:
@@ -495,6 +557,8 @@ static int si_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
                return 4;
 
        case PIPE_CAP_GLSL_FEATURE_LEVEL:
+               if (sscreen->b.debug_flags & DBG_NIR)
+                       return 140; /* no geometry and tessellation shaders yet */
                if (si_have_tgsi_compute(sscreen))
                        return 450;
                return 420;
@@ -512,6 +576,9 @@ static int si_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
                        sscreen->b.info.drm_minor < 50);
 
        case PIPE_CAP_SPARSE_BUFFER_PAGE_SIZE:
+               /* TODO: GFX9 hangs. */
+               if (sscreen->b.chip_class >= GFX9)
+                       return 0;
                /* Disable on SI due to VM faults in CP DMA. Enable once these
                 * faults are mitigated in software.
                 */
@@ -586,11 +653,6 @@ static int si_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
        case PIPE_CAP_MAX_RENDER_TARGETS:
                return 8;
 
-       /* Timer queries, present when the clock frequency is non zero. */
-       case PIPE_CAP_QUERY_TIMESTAMP:
-       case PIPE_CAP_QUERY_TIME_ELAPSED:
-               return sscreen->b.info.clock_crystal_freq != 0;
-
        case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
        case PIPE_CAP_MIN_TEXEL_OFFSET:
                return -32;
@@ -694,6 +756,10 @@ static int si_get_shader_param(struct pipe_screen* pscreen,
        case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
                return 32;
        case PIPE_SHADER_CAP_PREFERRED_IR:
+               if (sscreen->b.debug_flags & DBG_NIR &&
+                   (shader == PIPE_SHADER_VERTEX ||
+                    shader == PIPE_SHADER_FRAGMENT))
+                       return PIPE_SHADER_IR_NIR;
                return PIPE_SHADER_IR_TGSI;
        case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
                return 3;
@@ -701,7 +767,6 @@ static int si_get_shader_param(struct pipe_screen* pscreen,
        /* Supported boolean features. */
        case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
        case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
-       case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
        case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
        case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
        case PIPE_SHADER_CAP_INTEGERS:
@@ -711,10 +776,18 @@ static int si_get_shader_param(struct pipe_screen* pscreen,
                return 1;
 
        case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
-               /* TODO: Indirection of geometry shader input dimension is not
-                * handled yet
-                */
-               return shader != PIPE_SHADER_GEOMETRY;
+               /* TODO: Indirect indexing of GS inputs is unimplemented. */
+               return shader != PIPE_SHADER_GEOMETRY &&
+                      (sscreen->llvm_has_working_vgpr_indexing ||
+                       /* TCS and TES load inputs directly from LDS or
+                        * offchip memory, so indirect indexing is trivial. */
+                       shader == PIPE_SHADER_TESS_CTRL ||
+                       shader == PIPE_SHADER_TESS_EVAL);
+
+       case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
+               return sscreen->llvm_has_working_vgpr_indexing ||
+                      /* TCS stores outputs directly to memory. */
+                      shader == PIPE_SHADER_TESS_CTRL;
 
        /* Unsupported boolean features. */
        case PIPE_SHADER_CAP_SUBROUTINES:
@@ -726,6 +799,36 @@ static int si_get_shader_param(struct pipe_screen* pscreen,
        return 0;
 }
 
+static const struct nir_shader_compiler_options nir_options = {
+       .vertex_id_zero_based = true,
+       .lower_scmp = true,
+       .lower_flrp32 = true,
+       .lower_fsat = true,
+       .lower_fdiv = true,
+       .lower_sub = true,
+       .lower_pack_snorm_2x16 = true,
+       .lower_pack_snorm_4x8 = true,
+       .lower_pack_unorm_2x16 = true,
+       .lower_pack_unorm_4x8 = true,
+       .lower_unpack_snorm_2x16 = true,
+       .lower_unpack_snorm_4x8 = true,
+       .lower_unpack_unorm_2x16 = true,
+       .lower_unpack_unorm_4x8 = true,
+       .lower_extract_byte = true,
+       .lower_extract_word = true,
+       .max_unroll_iterations = 32,
+       .native_integers = true,
+};
+
+static const void *
+si_get_compiler_options(struct pipe_screen *screen,
+                       enum pipe_shader_ir ir,
+                       enum pipe_shader_type shader)
+{
+       assert(ir == PIPE_SHADER_IR_NIR);
+       return &nir_options;
+}
+
 static void si_destroy_screen(struct pipe_screen* pscreen)
 {
        struct si_screen *sscreen = (struct si_screen *)pscreen;
@@ -742,11 +845,16 @@ static void si_destroy_screen(struct pipe_screen* pscreen)
                return;
 
        util_queue_destroy(&sscreen->shader_compiler_queue);
+       util_queue_destroy(&sscreen->shader_compiler_queue_low_priority);
 
        for (i = 0; i < ARRAY_SIZE(sscreen->tm); i++)
                if (sscreen->tm[i])
                        LLVMDisposeTargetMachine(sscreen->tm[i]);
 
+       for (i = 0; i < ARRAY_SIZE(sscreen->tm_low_priority); i++)
+               if (sscreen->tm_low_priority[i])
+                       LLVMDisposeTargetMachine(sscreen->tm_low_priority[i]);
+
        /* Free shader parts. */
        for (i = 0; i < ARRAY_SIZE(parts); i++) {
                while (parts[i]) {
@@ -857,10 +965,11 @@ static void si_test_vmfault(struct si_screen *sscreen)
        exit(0);
 }
 
-struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
+struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws,
+                                          const struct pipe_screen_config *config)
 {
        struct si_screen *sscreen = CALLOC_STRUCT(si_screen);
-       unsigned num_cpus, num_compiler_threads, i;
+       unsigned num_threads, num_compiler_threads, num_compiler_threads_lowprio, i;
 
        if (!sscreen) {
                return NULL;
@@ -871,11 +980,12 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
        sscreen->b.b.destroy = si_destroy_screen;
        sscreen->b.b.get_param = si_get_param;
        sscreen->b.b.get_shader_param = si_get_shader_param;
+       sscreen->b.b.get_compiler_options = si_get_compiler_options;
        sscreen->b.b.resource_create = r600_resource_create_common;
 
        si_init_screen_state_functions(sscreen);
 
-       if (!r600_common_screen_init(&sscreen->b, ws) ||
+       if (!r600_common_screen_init(&sscreen->b, ws, config->flags) ||
            !si_init_gs_info(sscreen) ||
            !si_init_shader_cache(sscreen)) {
                FREE(sscreen);
@@ -885,17 +995,30 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
        /* Only enable as many threads as we have target machines, but at most
         * the number of CPUs - 1 if there is more than one.
         */
-       num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
-       num_cpus = MAX2(1, num_cpus - 1);
-       num_compiler_threads = MIN2(num_cpus, ARRAY_SIZE(sscreen->tm));
+       num_threads = sysconf(_SC_NPROCESSORS_ONLN);
+       num_threads = MAX2(1, num_threads - 1);
+       num_compiler_threads = MIN2(num_threads, ARRAY_SIZE(sscreen->tm));
+       num_compiler_threads_lowprio =
+               MIN2(num_threads, ARRAY_SIZE(sscreen->tm_low_priority));
 
        if (!util_queue_init(&sscreen->shader_compiler_queue, "si_shader",
-                            32, num_compiler_threads, 0)) {
+                            32, num_compiler_threads,
+                            UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
                si_destroy_shader_cache(sscreen);
                FREE(sscreen);
                return NULL;
        }
 
+       if (!util_queue_init(&sscreen->shader_compiler_queue_low_priority,
+                            "si_shader_low",
+                            32, num_compiler_threads_lowprio,
+                            UTIL_QUEUE_INIT_RESIZE_IF_FULL |
+                            UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY)) {
+              si_destroy_shader_cache(sscreen);
+              FREE(sscreen);
+              return NULL;
+       }
+
        si_handle_env_var_force_family(sscreen);
 
        if (!debug_get_bool_option("RADEON_DISABLE_PERFCOUNTERS", false))
@@ -920,14 +1043,19 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
                 sscreen->b.info.pfp_fw_version >= 211 &&
                 sscreen->b.info.me_fw_version >= 173) ||
                (sscreen->b.chip_class == SI &&
-                sscreen->b.info.pfp_fw_version >= 121 &&
-                sscreen->b.info.me_fw_version >= 87);
+                sscreen->b.info.pfp_fw_version >= 79 &&
+                sscreen->b.info.me_fw_version >= 142);
 
        sscreen->has_ds_bpermute = sscreen->b.chip_class >= VI;
        sscreen->has_msaa_sample_loc_bug = (sscreen->b.family >= CHIP_POLARIS10 &&
                                            sscreen->b.family <= CHIP_POLARIS12) ||
                                           sscreen->b.family == CHIP_VEGA10 ||
                                           sscreen->b.family == CHIP_RAVEN;
+       /* While it would be nice not to have this flag, we are constrained
+        * by the reality that LLVM 5.0 doesn't have working VGPR indexing
+        * on GFX9.
+        */
+       sscreen->llvm_has_working_vgpr_indexing = sscreen->b.chip_class <= VI;
 
        sscreen->b.has_cp_dma = true;
        sscreen->b.has_streamout = true;
@@ -950,8 +1078,10 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
                (sscreen->b.debug_flags & DBG_MONOLITHIC_SHADERS) != 0;
 
        sscreen->b.barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SMEM_L1 |
-                                           SI_CONTEXT_INV_VMEM_L1 |
-                                           SI_CONTEXT_INV_GLOBAL_L2;
+                                           SI_CONTEXT_INV_VMEM_L1;
+       if (sscreen->b.chip_class <= VI)
+               sscreen->b.barrier_flags.cp_to_L2 |= SI_CONTEXT_INV_GLOBAL_L2;
+
        sscreen->b.barrier_flags.compute_to_L2 = SI_CONTEXT_CS_PARTIAL_FLUSH;
 
        if (debug_get_bool_option("RADEON_DUMP_SHADERS", false))
@@ -959,6 +1089,8 @@ struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws)
 
        for (i = 0; i < num_compiler_threads; i++)
                sscreen->tm[i] = si_create_llvm_target_machine(sscreen);
+       for (i = 0; i < num_compiler_threads_lowprio; i++)
+               sscreen->tm_low_priority[i] = si_create_llvm_target_machine(sscreen);
 
        /* Create the auxiliary context. This must be done last. */
        sscreen->b.aux_context = si_create_context(&sscreen->b.b, 0);