radeonsi: set COMPUTE_DISPATCH_INITIATOR.ORDER_MODE = 1
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
index 913a2ddbfe61cb45a2f721d59e045222d62c6ce0..fba02faeb5a79f8b03ffb8b6a79e373e63d5d0c4 100644 (file)
 #include "amd_kernel_code_t.h"
 #include "radeon/r600_cs.h"
 #include "si_pipe.h"
+#include "si_compute.h"
 #include "sid.h"
 
-#define MAX_GLOBAL_BUFFERS 22
-
-struct si_compute {
-       struct si_screen *screen;
-       struct tgsi_token *tokens;
-       struct util_queue_fence ready;
-       struct si_compiler_ctx_state compiler_ctx_state;
-
-       unsigned ir_type;
-       unsigned local_size;
-       unsigned private_size;
-       unsigned input_size;
-       struct si_shader shader;
-
-       struct pipe_resource *global_buffers[MAX_GLOBAL_BUFFERS];
-       unsigned use_code_object_v2 : 1;
-       unsigned variable_group_size : 1;
-};
-
 struct dispatch_packet {
        uint16_t header;
        uint16_t setup;
@@ -113,18 +95,29 @@ static void si_create_compute_state_async(void *job, int thread_index)
 
        memset(&sel, 0, sizeof(sel));
 
+       sel.screen = program->screen;
        tgsi_scan_shader(program->tokens, &sel.info);
        sel.tokens = program->tokens;
        sel.type = PIPE_SHADER_COMPUTE;
        sel.local_size = program->local_size;
+       si_get_active_slot_masks(&sel.info,
+                                &program->active_const_and_shader_buffers,
+                                &program->active_samplers_and_images);
 
        program->shader.selector = &sel;
        program->shader.is_monolithic = true;
+       program->uses_grid_size = sel.info.uses_grid_size;
+       program->uses_block_size = sel.info.uses_block_size;
+       program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
+       program->uses_bindless_images = sel.info.uses_bindless_images;
 
        if (si_shader_create(program->screen, tm, &program->shader, debug)) {
                program->shader.compilation_failed = true;
        } else {
                bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
+               unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS +
+                                     (sel.info.uses_grid_size ? 3 : 0) +
+                                     (sel.info.uses_block_size ? 3 : 0);
 
                shader->config.rsrc1 =
                        S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
@@ -133,10 +126,13 @@ static void si_create_compute_state_async(void *job, int thread_index)
                        S_00B848_FLOAT_MODE(shader->config.float_mode);
 
                shader->config.rsrc2 =
-                       S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR) |
+                       S_00B84C_USER_SGPR(user_sgprs) |
                        S_00B84C_SCRATCH_EN(scratch_enabled) |
-                       S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
-                       S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
+                       S_00B84C_TGID_X_EN(sel.info.uses_block_id[0]) |
+                       S_00B84C_TGID_Y_EN(sel.info.uses_block_id[1]) |
+                       S_00B84C_TGID_Z_EN(sel.info.uses_block_id[2]) |
+                       S_00B84C_TIDIG_COMP_CNT(sel.info.uses_thread_id[2] ? 2 :
+                                               sel.info.uses_thread_id[1] ? 1 : 0) |
                        S_00B84C_LDS_SIZE(shader->config.lds_size);
 
                program->variable_group_size =
@@ -214,7 +210,24 @@ static void *si_create_compute_state(
 static void si_bind_compute_state(struct pipe_context *ctx, void *state)
 {
        struct si_context *sctx = (struct si_context*)ctx;
-       sctx->cs_shader_state.program = (struct si_compute*)state;
+       struct si_compute *program = (struct si_compute*)state;
+
+       sctx->cs_shader_state.program = program;
+       if (!program)
+               return;
+
+       /* Wait because we need active slot usage masks. */
+       if (program->ir_type == PIPE_SHADER_IR_TGSI)
+               util_queue_fence_wait(&program->ready);
+
+       si_set_active_descriptors(sctx,
+                                 SI_DESCS_FIRST_COMPUTE +
+                                 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
+                                 program->active_const_and_shader_buffers);
+       si_set_active_descriptors(sctx,
+                                 SI_DESCS_FIRST_COMPUTE +
+                                 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
+                                 program->active_samplers_and_images);
 }
 
 static void si_set_global_binding(
@@ -252,11 +265,6 @@ static void si_initialize_compute(struct si_context *sctx)
        struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
        uint64_t bc_va;
 
-       radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
-       radeon_emit(cs, 0);
-       radeon_emit(cs, 0);
-       radeon_emit(cs, 0);
-
        radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
        /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
        radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
@@ -331,7 +339,7 @@ static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
        if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
                uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
 
-               si_shader_apply_scratch_relocs(sctx, shader, config, scratch_va);
+               si_shader_apply_scratch_relocs(shader, scratch_va);
 
                if (si_shader_binary_upload(sctx->screen, shader))
                        return false;
@@ -468,16 +476,20 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
        /* Disable address clamping */
        uint32_t scratch_dword2 = 0xffffffff;
        uint32_t scratch_dword3 =
-               S_008F0C_ELEMENT_SIZE(max_private_element_size) |
                S_008F0C_INDEX_STRIDE(3) |
                S_008F0C_ADD_TID_ENABLE(1);
 
+       if (sctx->b.chip_class >= GFX9) {
+               assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
+       } else {
+               scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
 
-       if (sctx->screen->b.chip_class < VI) {
-               /* BUF_DATA_FORMAT is ignored, but it cannot be
-                  BUF_DATA_FORMAT_INVALID. */
-               scratch_dword3 |=
-                       S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
+               if (sctx->b.chip_class < VI) {
+                       /* BUF_DATA_FORMAT is ignored, but it cannot be
+                        * BUF_DATA_FORMAT_INVALID. */
+                       scratch_dword3 |=
+                               S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
+               }
        }
 
        radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
@@ -646,36 +658,43 @@ static bool si_upload_compute_input(struct si_context *sctx,
 static void si_setup_tgsi_grid(struct si_context *sctx,
                                 const struct pipe_grid_info *info)
 {
+       struct si_compute *program = sctx->cs_shader_state.program;
        struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
        unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
-                                 4 * SI_SGPR_GRID_SIZE;
+                                4 * SI_NUM_RESOURCE_SGPRS;
+       unsigned block_size_reg = grid_size_reg +
+                                 /* 12 bytes = 3 dwords. */
+                                 12 * program->uses_grid_size;
 
        if (info->indirect) {
-               uint64_t base_va = r600_resource(info->indirect)->gpu_address;
-               uint64_t va = base_va + info->indirect_offset;
-               int i;
-
-               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
-                                (struct r600_resource *)info->indirect,
-                                RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
-
-               for (i = 0; i < 3; ++i) {
-                       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
-                       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
-                                       COPY_DATA_DST_SEL(COPY_DATA_REG));
-                       radeon_emit(cs, (va +  4 * i));
-                       radeon_emit(cs, (va + 4 * i) >> 32);
-                       radeon_emit(cs, (grid_size_reg >> 2) + i);
-                       radeon_emit(cs, 0);
+               if (program->uses_grid_size) {
+                       uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+                       uint64_t va = base_va + info->indirect_offset;
+                       int i;
+
+                       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+                                        (struct r600_resource *)info->indirect,
+                                        RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
+
+                       for (i = 0; i < 3; ++i) {
+                               radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+                               radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+                                               COPY_DATA_DST_SEL(COPY_DATA_REG));
+                               radeon_emit(cs, (va + 4 * i));
+                               radeon_emit(cs, (va + 4 * i) >> 32);
+                               radeon_emit(cs, (grid_size_reg >> 2) + i);
+                               radeon_emit(cs, 0);
+                       }
                }
        } else {
-               struct si_compute *program = sctx->cs_shader_state.program;
-
-               radeon_set_sh_reg_seq(cs, grid_size_reg, program->variable_group_size ? 6 : 3);
-               radeon_emit(cs, info->grid[0]);
-               radeon_emit(cs, info->grid[1]);
-               radeon_emit(cs, info->grid[2]);
-               if (program->variable_group_size) {
+               if (program->uses_grid_size) {
+                       radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
+                       radeon_emit(cs, info->grid[0]);
+                       radeon_emit(cs, info->grid[1]);
+                       radeon_emit(cs, info->grid[2]);
+               }
+               if (program->variable_group_size && program->uses_block_size) {
+                       radeon_set_sh_reg_seq(cs, block_size_reg, 3);
                        radeon_emit(cs, info->block[0]);
                        radeon_emit(cs, info->block[1]);
                        radeon_emit(cs, info->block[2]);
@@ -699,6 +718,13 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
        radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
        radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
 
+       unsigned dispatch_initiator =
+               S_00B800_COMPUTE_SHADER_EN(1) |
+               S_00B800_FORCE_START_AT_000(1) |
+               /* If the KMD allows it (there is a KMD hw register for it),
+                * allow launching waves out-of-order. (same as Vulkan) */
+               S_00B800_ORDER_MODE(sctx->b.chip_class >= CIK);
+
        if (info->indirect) {
                uint64_t base_va = r600_resource(info->indirect)->gpu_address;
 
@@ -715,14 +741,14 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
                radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
                                PKT3_SHADER_TYPE_S(1));
                radeon_emit(cs, info->indirect_offset);
-               radeon_emit(cs, 1);
+               radeon_emit(cs, dispatch_initiator);
        } else {
                radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
                                PKT3_SHADER_TYPE_S(1));
                radeon_emit(cs, info->grid[0]);
                radeon_emit(cs, info->grid[1]);
                radeon_emit(cs, info->grid[2]);
-               radeon_emit(cs, 1);
+               radeon_emit(cs, dispatch_initiator);
        }
 }
 
@@ -751,12 +777,9 @@ static void si_launch_grid(
                sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
                                 SI_CONTEXT_CS_PARTIAL_FLUSH;
 
-       if (program->ir_type == PIPE_SHADER_IR_TGSI) {
-               util_queue_fence_wait(&program->ready);
-
-               if (program->shader.compilation_failed)
-                       return;
-       }
+       if (program->ir_type == PIPE_SHADER_IR_TGSI &&
+           program->shader.compilation_failed)
+               return;
 
        si_decompress_compute_textures(sctx);
 
@@ -767,8 +790,9 @@ static void si_launch_grid(
        if (info->indirect) {
                r600_context_add_resource_size(ctx, info->indirect);
 
-               /* The hw doesn't read the indirect buffer via TC L2. */
-               if (r600_resource(info->indirect)->TC_L2_dirty) {
+               /* Indirect buffers use TC L2 on GFX9, but not older hw. */
+               if (sctx->b.chip_class <= VI &&
+                   r600_resource(info->indirect)->TC_L2_dirty) {
                        sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
                        r600_resource(info->indirect)->TC_L2_dirty = false;
                }
@@ -841,7 +865,8 @@ static void si_delete_compute_state(struct pipe_context *ctx, void* state){
        }
 
        if (program->ir_type == PIPE_SHADER_IR_TGSI) {
-               util_queue_fence_wait(&program->ready);
+               util_queue_drop_job(&sctx->screen->shader_compiler_queue,
+                                   &program->ready);
                util_queue_fence_destroy(&program->ready);
        }