amd,radeonsi: rename radeon_winsys_cs -> radeon_cmdbuf
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
index 4c980668d350531631d1a24819b58f85037a2e98..cb320323db3dd7e16befe4c6179021697db9fac2 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  */
 
 #include "tgsi/tgsi_parse.h"
+#include "util/u_async_debug.h"
 #include "util/u_memory.h"
 #include "util/u_upload_mgr.h"
 
 #include "amd_kernel_code_t.h"
-#include "radeon/r600_cs.h"
-#include "si_pipe.h"
+#include "si_build_pm4.h"
 #include "si_compute.h"
-#include "sid.h"
+
+#define COMPUTE_DBG(rscreen, fmt, args...) \
+       do { \
+               if ((rscreen->debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
+       } while (0);
 
 struct dispatch_packet {
        uint16_t header;
@@ -81,23 +86,30 @@ static void si_create_compute_state_async(void *job, int thread_index)
        struct si_compute *program = (struct si_compute *)job;
        struct si_shader *shader = &program->shader;
        struct si_shader_selector sel;
-       LLVMTargetMachineRef tm;
+       struct si_compiler *compiler;
        struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug;
 
-       if (thread_index >= 0) {
-               assert(thread_index < ARRAY_SIZE(program->screen->tm));
-               tm = program->screen->tm[thread_index];
-               if (!debug->async)
-                       debug = NULL;
-       } else {
-               tm = program->compiler_ctx_state.tm;
-       }
+       assert(!debug->debug_message || debug->async);
+       assert(thread_index >= 0);
+       assert(thread_index < ARRAY_SIZE(program->screen->compiler));
+       compiler = &program->screen->compiler[thread_index];
 
        memset(&sel, 0, sizeof(sel));
 
        sel.screen = program->screen;
-       tgsi_scan_shader(program->tokens, &sel.info);
-       sel.tokens = program->tokens;
+
+       if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+               tgsi_scan_shader(program->ir.tgsi, &sel.info);
+               sel.tokens = program->ir.tgsi;
+       } else {
+               assert(program->ir_type == PIPE_SHADER_IR_NIR);
+               sel.nir = program->ir.nir;
+
+               si_nir_scan_shader(sel.nir, &sel.info);
+               si_lower_nir(&sel);
+       }
+
+
        sel.type = PIPE_SHADER_COMPUTE;
        sel.local_size = program->local_size;
        si_get_active_slot_masks(&sel.info,
@@ -108,8 +120,10 @@ static void si_create_compute_state_async(void *job, int thread_index)
        program->shader.is_monolithic = true;
        program->uses_grid_size = sel.info.uses_grid_size;
        program->uses_block_size = sel.info.uses_block_size;
+       program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
+       program->uses_bindless_images = sel.info.uses_bindless_images;
 
-       if (si_shader_create(program->screen, tm, &program->shader, debug)) {
+       if (si_shader_create(program->screen, compiler, &program->shader, debug)) {
                program->shader.compilation_failed = true;
        } else {
                bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
@@ -137,7 +151,9 @@ static void si_create_compute_state_async(void *job, int thread_index)
                        sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
        }
 
-       FREE(program->tokens);
+       if (program->ir_type == PIPE_SHADER_IR_TGSI)
+               FREE(program->ir.tgsi);
+
        program->shader.selector = NULL;
 }
 
@@ -149,35 +165,51 @@ static void *si_create_compute_state(
        struct si_screen *sscreen = (struct si_screen *)ctx->screen;
        struct si_compute *program = CALLOC_STRUCT(si_compute);
 
+       pipe_reference_init(&program->reference, 1);
        program->screen = (struct si_screen *)ctx->screen;
        program->ir_type = cso->ir_type;
        program->local_size = cso->req_local_mem;
        program->private_size = cso->req_private_mem;
        program->input_size = cso->req_input_mem;
-       program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
-                                       cso->ir_type == PIPE_SHADER_IR_NATIVE;
-
-       if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
-               program->tokens = tgsi_dup_tokens(cso->prog);
-               if (!program->tokens) {
-                       FREE(program);
-                       return NULL;
+       program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE;
+
+       if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
+               if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
+                       program->ir.tgsi = tgsi_dup_tokens(cso->prog);
+                       if (!program->ir.tgsi) {
+                               FREE(program);
+                               return NULL;
+                       }
+               } else {
+                       assert(cso->ir_type == PIPE_SHADER_IR_NIR);
+                       program->ir.nir = (struct nir_shader *) cso->prog;
                }
 
-               program->compiler_ctx_state.tm = sctx->tm;
-               program->compiler_ctx_state.debug = sctx->b.debug;
+               program->compiler_ctx_state.debug = sctx->debug;
                program->compiler_ctx_state.is_debug_context = sctx->is_debug;
-               p_atomic_inc(&sscreen->b.num_shaders_created);
+               p_atomic_inc(&sscreen->num_shaders_created);
                util_queue_fence_init(&program->ready);
 
-               if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
-                   sctx->is_debug ||
-                   r600_can_dump_shader(&sscreen->b, PIPE_SHADER_COMPUTE))
-                       si_create_compute_state_async(program, -1);
-               else
-                       util_queue_add_job(&sscreen->shader_compiler_queue,
-                                          program, &program->ready,
-                                          si_create_compute_state_async, NULL);
+               struct util_async_debug_callback async_debug;
+               bool wait =
+                       (sctx->debug.debug_message && !sctx->debug.async) ||
+                       sctx->is_debug ||
+                       si_can_dump_shader(sscreen, PIPE_SHADER_COMPUTE);
+
+               if (wait) {
+                       u_async_debug_init(&async_debug);
+                       program->compiler_ctx_state.debug = async_debug.base;
+               }
+
+               util_queue_add_job(&sscreen->shader_compiler_queue,
+                                  program, &program->ready,
+                                  si_create_compute_state_async, NULL);
+
+               if (wait) {
+                       util_queue_fence_wait(&program->ready);
+                       u_async_debug_drain(&async_debug, &sctx->debug);
+                       u_async_debug_cleanup(&async_debug);
+               }
        } else {
                const struct pipe_llvm_program_header *header;
                const char *code;
@@ -193,7 +225,7 @@ static void *si_create_compute_state(
                        si_shader_binary_read_config(&program->shader.binary,
                                     &program->shader.config, 0);
                }
-               si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
+               si_shader_dump(sctx->screen, &program->shader, &sctx->debug,
                               PIPE_SHADER_COMPUTE, stderr, true);
                if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
                        fprintf(stderr, "LLVM failed to upload shader\n");
@@ -215,7 +247,7 @@ static void si_bind_compute_state(struct pipe_context *ctx, void *state)
                return;
 
        /* Wait because we need active slot usage masks. */
-       if (program->ir_type == PIPE_SHADER_IR_TGSI)
+       if (program->ir_type != PIPE_SHADER_IR_NATIVE)
                util_queue_fence_wait(&program->ready);
 
        si_set_active_descriptors(sctx,
@@ -260,20 +292,15 @@ static void si_set_global_binding(
 
 static void si_initialize_compute(struct si_context *sctx)
 {
-       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = sctx->gfx_cs;
        uint64_t bc_va;
 
-       radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
-       radeon_emit(cs, 0);
-       radeon_emit(cs, 0);
-       radeon_emit(cs, 0);
-
        radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
        /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
        radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
        radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
 
-       if (sctx->b.chip_class >= CIK) {
+       if (sctx->chip_class >= CIK) {
                /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
                radeon_set_sh_reg_seq(cs,
                                     R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
@@ -288,7 +315,7 @@ static void si_initialize_compute(struct si_context *sctx)
         * kernel if we want to use something other than the default value,
         * which is now 0x22f.
         */
-       if (sctx->b.chip_class <= SI) {
+       if (sctx->chip_class <= SI) {
                /* XXX: This should be:
                 * (number of compute units) * 4 * (waves per simd) - 1 */
 
@@ -299,14 +326,12 @@ static void si_initialize_compute(struct si_context *sctx)
        /* Set the pointer to border colors. */
        bc_va = sctx->border_color_buffer->gpu_address;
 
-       if (sctx->b.chip_class >= CIK) {
+       if (sctx->chip_class >= CIK) {
                radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
                radeon_emit(cs, bc_va >> 8);  /* R_030E00_TA_CS_BC_BASE_ADDR */
-               radeon_emit(cs, bc_va >> 40); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
+               radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
        } else {
-               if (sctx->screen->b.info.drm_major == 3 ||
-                   (sctx->screen->b.info.drm_major == 2 &&
-                    sctx->screen->b.info.drm_minor >= 48)) {
+               if (sctx->screen->info.si_TA_CS_BC_BASE_ADDR_allowed) {
                        radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR,
                                              bc_va >> 8);
                }
@@ -329,11 +354,11 @@ static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
        if (scratch_bo_size < scratch_needed) {
                r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
 
-               sctx->compute_scratch_buffer = (struct r600_resource*)
-                       r600_aligned_buffer_create(&sctx->screen->b.b,
-                                                  R600_RESOURCE_FLAG_UNMAPPABLE,
-                                                  PIPE_USAGE_DEFAULT,
-                                                  scratch_needed, 256);
+               sctx->compute_scratch_buffer =
+                       si_aligned_buffer_create(&sctx->screen->b,
+                                                SI_RESOURCE_FLAG_UNMAPPABLE,
+                                                PIPE_USAGE_DEFAULT,
+                                                scratch_needed, 256);
 
                if (!sctx->compute_scratch_buffer)
                        return false;
@@ -360,7 +385,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
                                     const amd_kernel_code_t *code_object,
                                     unsigned offset)
 {
-       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = sctx->gfx_cs;
        struct si_shader_config inline_config = {0};
        struct si_shader_config *config;
        uint64_t shader_va;
@@ -369,7 +394,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
            sctx->cs_shader_state.offset == offset)
                return true;
 
-       if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+       if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
                config = &shader->config;
        } else {
                unsigned lds_blocks;
@@ -387,7 +412,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
                * allocated in the shader and 4 bytes allocated by the state
                * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
                */
-               if (sctx->b.chip_class <= SI) {
+               if (sctx->chip_class <= SI) {
                        lds_blocks += align(program->local_size, 256) >> 8;
                } else {
                        lds_blocks += align(program->local_size, 512) >> 9;
@@ -410,7 +435,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
                            config->scratch_bytes_per_wave *
                            sctx->scratch_waves);
 
-               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+               radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
                              shader->scratch_bo, RADEON_USAGE_READWRITE,
                              RADEON_PRIO_SCRATCH_BUFFER);
        }
@@ -422,7 +447,7 @@ static bool si_switch_compute_shader(struct si_context *sctx,
         * command. However, that would add more complexity and we're likely
         * to get a shader state change in that case anyway.
         */
-       if (sctx->b.chip_class >= CIK) {
+       if (sctx->chip_class >= CIK) {
                cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
                                         0, program->shader.bo->b.b.width0);
        }
@@ -434,12 +459,12 @@ static bool si_switch_compute_shader(struct si_context *sctx,
                shader_va += sizeof(amd_kernel_code_t);
        }
 
-       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
+       radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->bo,
                                  RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
 
        radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
        radeon_emit(cs, shader_va >> 8);
-       radeon_emit(cs, shader_va >> 40);
+       radeon_emit(cs, S_00B834_DATA(shader_va >> 40));
 
        radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
        radeon_emit(cs, config->rsrc1);
@@ -464,7 +489,7 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
                                          const amd_kernel_code_t *code_object,
                                          unsigned user_sgpr)
 {
-       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = sctx->gfx_cs;
        uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
 
        unsigned max_private_element_size = AMD_HSA_BITS_GET(
@@ -482,12 +507,12 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
                S_008F0C_INDEX_STRIDE(3) |
                S_008F0C_ADD_TID_ENABLE(1);
 
-       if (sctx->b.chip_class >= GFX9) {
+       if (sctx->chip_class >= GFX9) {
                assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
        } else {
                scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
 
-               if (sctx->b.chip_class < VI) {
+               if (sctx->chip_class < VI) {
                        /* BUF_DATA_FORMAT is ignored, but it cannot be
                         * BUF_DATA_FORMAT_INVALID. */
                        scratch_dword3 |=
@@ -509,7 +534,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
                                      uint64_t kernel_args_va)
 {
        struct si_compute *program = sctx->cs_shader_state.program;
-       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = sctx->gfx_cs;
 
        static const enum amd_code_property_mask_t workgroup_count_masks [] = {
                AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
@@ -537,20 +562,20 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
                /* Upload dispatch ptr */
                memset(&dispatch, 0, sizeof(dispatch));
 
-               dispatch.workgroup_size_x = info->block[0];
-               dispatch.workgroup_size_y = info->block[1];
-               dispatch.workgroup_size_z = info->block[2];
+               dispatch.workgroup_size_x = util_cpu_to_le16(info->block[0]);
+               dispatch.workgroup_size_y = util_cpu_to_le16(info->block[1]);
+               dispatch.workgroup_size_z = util_cpu_to_le16(info->block[2]);
 
-               dispatch.grid_size_x = info->grid[0] * info->block[0];
-               dispatch.grid_size_y = info->grid[1] * info->block[1];
-               dispatch.grid_size_z = info->grid[2] * info->block[2];
+               dispatch.grid_size_x = util_cpu_to_le32(info->grid[0] * info->block[0]);
+               dispatch.grid_size_y = util_cpu_to_le32(info->grid[1] * info->block[1]);
+               dispatch.grid_size_z = util_cpu_to_le32(info->grid[2] * info->block[2]);
 
-               dispatch.private_segment_size = program->private_size;
-               dispatch.group_segment_size = program->local_size;
+               dispatch.private_segment_size = util_cpu_to_le32(program->private_size);
+               dispatch.group_segment_size = util_cpu_to_le32(program->local_size);
 
-               dispatch.kernarg_address = kernel_args_va;
+               dispatch.kernarg_address = util_cpu_to_le64(kernel_args_va);
 
-               u_upload_data(sctx->b.b.const_uploader, 0, sizeof(dispatch),
+               u_upload_data(sctx->b.const_uploader, 0, sizeof(dispatch),
                               256, &dispatch, &dispatch_offset,
                               (struct pipe_resource**)&dispatch_buf);
 
@@ -558,7 +583,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
                        fprintf(stderr, "Error: Failed to allocate dispatch "
                                        "packet.");
                }
-               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, dispatch_buf,
+               radeon_add_to_buffer_list(sctx, sctx->gfx_cs, dispatch_buf,
                                  RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
 
                dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
@@ -598,7 +623,7 @@ static bool si_upload_compute_input(struct si_context *sctx,
                                    const amd_kernel_code_t *code_object,
                                    const struct pipe_grid_info *info)
 {
-       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = sctx->gfx_cs;
        struct si_compute *program = sctx->cs_shader_state.program;
        struct r600_resource *input_buffer = NULL;
        unsigned kernel_args_size;
@@ -612,8 +637,8 @@ static bool si_upload_compute_input(struct si_context *sctx,
        /* The extra num_work_size_bytes are for work group / work item size information */
        kernel_args_size = program->input_size + num_work_size_bytes;
 
-       u_upload_alloc(sctx->b.b.const_uploader, 0, kernel_args_size,
-                      sctx->screen->b.info.tcc_cache_line_size,
+       u_upload_alloc(sctx->b.const_uploader, 0, kernel_args_size,
+                      sctx->screen->info.tcc_cache_line_size,
                       &kernel_args_offset,
                       (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
 
@@ -625,9 +650,9 @@ static bool si_upload_compute_input(struct si_context *sctx,
 
        if (!code_object) {
                for (i = 0; i < 3; i++) {
-                       kernel_args[i] = info->grid[i];
-                       kernel_args[i + 3] = info->grid[i] * info->block[i];
-                       kernel_args[i + 6] = info->block[i];
+                       kernel_args[i] = util_cpu_to_le32(info->grid[i]);
+                       kernel_args[i + 3] = util_cpu_to_le32(info->grid[i] * info->block[i]);
+                       kernel_args[i + 6] = util_cpu_to_le32(info->block[i]);
                }
        }
 
@@ -641,7 +666,7 @@ static bool si_upload_compute_input(struct si_context *sctx,
        }
 
 
-       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
+       radeon_add_to_buffer_list(sctx, sctx->gfx_cs, input_buffer,
                                  RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
 
        if (code_object) {
@@ -662,7 +687,7 @@ static void si_setup_tgsi_grid(struct si_context *sctx,
                                 const struct pipe_grid_info *info)
 {
        struct si_compute *program = sctx->cs_shader_state.program;
-       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct radeon_cmdbuf *cs = sctx->gfx_cs;
        unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
                                 4 * SI_NUM_RESOURCE_SGPRS;
        unsigned block_size_reg = grid_size_reg +
@@ -675,8 +700,8 @@ static void si_setup_tgsi_grid(struct si_context *sctx,
                        uint64_t va = base_va + info->indirect_offset;
                        int i;
 
-                       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
-                                        (struct r600_resource *)info->indirect,
+                       radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
+                                        r600_resource(info->indirect),
                                         RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
 
                        for (i = 0; i < 3; ++i) {
@@ -708,24 +733,46 @@ static void si_setup_tgsi_grid(struct si_context *sctx,
 static void si_emit_dispatch_packets(struct si_context *sctx,
                                      const struct pipe_grid_info *info)
 {
-       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
-       bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
+       struct si_screen *sscreen = sctx->screen;
+       struct radeon_cmdbuf *cs = sctx->gfx_cs;
+       bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
        unsigned waves_per_threadgroup =
                DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
+       unsigned compute_resource_limits =
+               S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
+
+       if (sctx->chip_class >= CIK) {
+               unsigned num_cu_per_se = sscreen->info.num_good_compute_units /
+                                        sscreen->info.max_se;
+
+               /* Force even distribution on all SIMDs in CU if the workgroup
+                * size is 64. This has shown some good improvements if # of CUs
+                * per SE is not a multiple of 4.
+                */
+               if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
+                       compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
+       }
 
        radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
-                         S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0));
+                         compute_resource_limits);
 
        radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
        radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
        radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
        radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
 
+       unsigned dispatch_initiator =
+               S_00B800_COMPUTE_SHADER_EN(1) |
+               S_00B800_FORCE_START_AT_000(1) |
+               /* If the KMD allows it (there is a KMD hw register for it),
+                * allow launching waves out-of-order. (same as Vulkan) */
+               S_00B800_ORDER_MODE(sctx->chip_class >= CIK);
+
        if (info->indirect) {
                uint64_t base_va = r600_resource(info->indirect)->gpu_address;
 
-               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
-                                (struct r600_resource *)info->indirect,
+               radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
+                                r600_resource(info->indirect),
                                 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
 
                radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
@@ -737,14 +784,14 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
                radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
                                PKT3_SHADER_TYPE_S(1));
                radeon_emit(cs, info->indirect_offset);
-               radeon_emit(cs, 1);
+               radeon_emit(cs, dispatch_initiator);
        } else {
                radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
                                PKT3_SHADER_TYPE_S(1));
                radeon_emit(cs, info->grid[0]);
                radeon_emit(cs, info->grid[1]);
                radeon_emit(cs, info->grid[2]);
-               radeon_emit(cs, 1);
+               radeon_emit(cs, dispatch_initiator);
        }
 }
 
@@ -764,41 +811,47 @@ static void si_launch_grid(
         * Only SI and certain CIK chips are affected.
         */
        bool cs_regalloc_hang =
-               (sctx->b.chip_class == SI ||
-                sctx->b.family == CHIP_BONAIRE ||
-                sctx->b.family == CHIP_KABINI) &&
+               (sctx->chip_class == SI ||
+                sctx->family == CHIP_BONAIRE ||
+                sctx->family == CHIP_KABINI) &&
                info->block[0] * info->block[1] * info->block[2] > 256;
 
        if (cs_regalloc_hang)
-               sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+               sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
                                 SI_CONTEXT_CS_PARTIAL_FLUSH;
 
-       if (program->ir_type == PIPE_SHADER_IR_TGSI &&
+       if (program->ir_type != PIPE_SHADER_IR_NATIVE &&
            program->shader.compilation_failed)
                return;
 
-       si_decompress_compute_textures(sctx);
+       if (sctx->last_num_draw_calls != sctx->num_draw_calls) {
+               si_update_fb_dirtiness_after_rendering(sctx);
+               sctx->last_num_draw_calls = sctx->num_draw_calls;
+       }
+
+       si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE);
 
        /* Add buffer sizes for memory checking in need_cs_space. */
-       r600_context_add_resource_size(ctx, &program->shader.bo->b.b);
+       si_context_add_resource_size(sctx, &program->shader.bo->b.b);
        /* TODO: add the scratch buffer */
 
        if (info->indirect) {
-               r600_context_add_resource_size(ctx, info->indirect);
+               si_context_add_resource_size(sctx, info->indirect);
 
-               /* The hw doesn't read the indirect buffer via TC L2. */
-               if (r600_resource(info->indirect)->TC_L2_dirty) {
-                       sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+               /* Indirect buffers use TC L2 on GFX9, but not older hw. */
+               if (sctx->chip_class <= VI &&
+                   r600_resource(info->indirect)->TC_L2_dirty) {
+                       sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
                        r600_resource(info->indirect)->TC_L2_dirty = false;
                }
        }
 
-       si_need_cs_space(sctx);
+       si_need_gfx_cs_space(sctx);
 
        if (!sctx->cs_shader_state.initialized)
                si_initialize_compute(sctx);
 
-       if (sctx->b.flags)
+       if (sctx->flags)
                si_emit_cache_flush(sctx);
 
        if (!si_switch_compute_shader(sctx, program, &program->shader,
@@ -806,12 +859,11 @@ static void si_launch_grid(
                return;
 
        si_upload_compute_shader_descriptors(sctx);
-       si_emit_compute_shader_userdata(sctx);
+       si_emit_compute_shader_pointers(sctx);
 
-       if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
-               sctx->atoms.s.render_cond->emit(&sctx->b,
-                                               sctx->atoms.s.render_cond);
-               si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
+       if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) {
+               sctx->atoms.s.render_cond.emit(sctx);
+               si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false);
        }
 
        if ((program->input_size ||
@@ -823,46 +875,52 @@ static void si_launch_grid(
        /* Global buffers */
        for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
                struct r600_resource *buffer =
-                               (struct r600_resource*)program->global_buffers[i];
+                       r600_resource(program->global_buffers[i]);
                if (!buffer) {
                        continue;
                }
-               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
+               radeon_add_to_buffer_list(sctx, sctx->gfx_cs, buffer,
                                          RADEON_USAGE_READWRITE,
                                          RADEON_PRIO_COMPUTE_GLOBAL);
        }
 
-       if (program->ir_type == PIPE_SHADER_IR_TGSI)
+       if (program->ir_type != PIPE_SHADER_IR_NATIVE)
                si_setup_tgsi_grid(sctx, info);
 
-       si_ce_pre_draw_synchronization(sctx);
-
        si_emit_dispatch_packets(sctx, info);
 
-       si_ce_post_draw_synchronization(sctx);
+       if (unlikely(sctx->current_saved_cs)) {
+               si_trace_emit(sctx);
+               si_log_compute_state(sctx, sctx->log);
+       }
 
        sctx->compute_is_busy = true;
-       sctx->b.num_compute_calls++;
+       sctx->num_compute_calls++;
        if (sctx->cs_shader_state.uses_scratch)
-               sctx->b.num_spill_compute_calls++;
+               sctx->num_spill_compute_calls++;
 
        if (cs_regalloc_hang)
-               sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
+               sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
 }
 
+void si_destroy_compute(struct si_compute *program)
+{
+       if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
+               util_queue_drop_job(&program->screen->shader_compiler_queue,
+                                   &program->ready);
+               util_queue_fence_destroy(&program->ready);
+       }
+
+       si_shader_destroy(&program->shader);
+       FREE(program);
+}
 
 static void si_delete_compute_state(struct pipe_context *ctx, void* state){
        struct si_compute *program = (struct si_compute *)state;
        struct si_context *sctx = (struct si_context*)ctx;
 
-       if (!state) {
+       if (!state)
                return;
-       }
-
-       if (program->ir_type == PIPE_SHADER_IR_TGSI) {
-               util_queue_fence_wait(&program->ready);
-               util_queue_fence_destroy(&program->ready);
-       }
 
        if (program == sctx->cs_shader_state.program)
                sctx->cs_shader_state.program = NULL;
@@ -870,8 +928,7 @@ static void si_delete_compute_state(struct pipe_context *ctx, void* state){
        if (program == sctx->cs_shader_state.emitted_program)
                sctx->cs_shader_state.emitted_program = NULL;
 
-       si_shader_destroy(&program->shader);
-       FREE(program);
+       si_compute_reference(&program, NULL);
 }
 
 static void si_set_compute_resources(struct pipe_context * ctx_,
@@ -880,11 +937,11 @@ static void si_set_compute_resources(struct pipe_context * ctx_,
 
 void si_init_compute_functions(struct si_context *sctx)
 {
-       sctx->b.b.create_compute_state = si_create_compute_state;
-       sctx->b.b.delete_compute_state = si_delete_compute_state;
-       sctx->b.b.bind_compute_state = si_bind_compute_state;
+       sctx->b.create_compute_state = si_create_compute_state;
+       sctx->b.delete_compute_state = si_delete_compute_state;
+       sctx->b.bind_compute_state = si_bind_compute_state;
 /*      ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
-       sctx->b.b.set_compute_resources = si_set_compute_resources;
-       sctx->b.b.set_global_binding = si_set_global_binding;
-       sctx->b.b.launch_grid = si_launch_grid;
+       sctx->b.set_compute_resources = si_set_compute_resources;
+       sctx->b.set_global_binding = si_set_global_binding;
+       sctx->b.launch_grid = si_launch_grid;
 }