gallium/radeon: add driver queries for compute/dma call stats and spills
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
index 3ad9182d5fecbcf70aaa8d988abd2e3617d99e01..2f7e1721c89b98f764326e64a4335a61fc58bf0e 100644 (file)
  *
  */
 
+#include "tgsi/tgsi_parse.h"
 #include "util/u_memory.h"
+#include "util/u_upload_mgr.h"
+#include "radeon/r600_pipe_common.h"
+#include "radeon/radeon_elf_util.h"
+#include "radeon/radeon_llvm_util.h"
 
 #include "radeon/r600_cs.h"
 #include "si_pipe.h"
 #include "si_shader.h"
 #include "sid.h"
 
-#include "radeon/radeon_llvm_util.h"
-
 #define MAX_GLOBAL_BUFFERS 20
-#if HAVE_LLVM < 0x0305
-#define NUM_USER_SGPRS 2
-#else
-#define NUM_USER_SGPRS 4
-#endif
 
 struct si_compute {
-       struct si_context *ctx;
-
+       unsigned ir_type;
        unsigned local_size;
        unsigned private_size;
        unsigned input_size;
-       unsigned num_kernels;
-       struct si_shader *kernels;
-       unsigned num_user_sgprs;
+       struct si_shader shader;
 
-       struct r600_resource *input_buffer;
        struct pipe_resource *global_buffers[MAX_GLOBAL_BUFFERS];
-
-       LLVMContextRef llvm_ctx;
 };
 
 static void *si_create_compute_state(
@@ -59,34 +51,72 @@ static void *si_create_compute_state(
        const struct pipe_compute_state *cso)
 {
        struct si_context *sctx = (struct si_context *)ctx;
+       struct si_screen *sscreen = (struct si_screen *)ctx->screen;
        struct si_compute *program = CALLOC_STRUCT(si_compute);
-       const struct pipe_llvm_program_header *header;
-       const unsigned char *code;
-       unsigned i;
+       struct si_shader *shader = &program->shader;
 
-       program->llvm_ctx = LLVMContextCreate();
 
-       header = cso->prog;
-       code = cso->prog + sizeof(struct pipe_llvm_program_header);
-
-       program->ctx = sctx;
+       program->ir_type = cso->ir_type;
        program->local_size = cso->req_local_mem;
        program->private_size = cso->req_private_mem;
        program->input_size = cso->req_input_mem;
 
-       program->num_kernels = radeon_llvm_get_num_kernels(program->llvm_ctx, code,
-                                                       header->num_bytes);
-       program->kernels = CALLOC(sizeof(struct si_shader),
-                                                       program->num_kernels);
-       for (i = 0; i < program->num_kernels; i++) {
-               LLVMModuleRef mod = radeon_llvm_get_kernel_module(program->llvm_ctx, i,
-                                                       code, header->num_bytes);
-               si_compile_llvm(sctx->screen, &program->kernels[i], mod);
-               LLVMDisposeModule(mod);
-       }
 
-       program->input_buffer = si_resource_create_custom(sctx->b.b.screen,
-               PIPE_USAGE_IMMUTABLE, program->input_size);
+       if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
+               struct si_shader_selector sel;
+               bool scratch_enabled;
+
+               memset(&sel, 0, sizeof(sel));
+
+               sel.tokens = tgsi_dup_tokens(cso->prog);
+               if (!sel.tokens) {
+                       FREE(program);
+                       return NULL;
+               }
+
+               tgsi_scan_shader(cso->prog, &sel.info);
+               sel.type = PIPE_SHADER_COMPUTE;
+               sel.local_size = cso->req_local_mem;
+
+               p_atomic_inc(&sscreen->b.num_shaders_created);
+
+               program->shader.selector = &sel;
+
+               if (si_shader_create(sscreen, sctx->tm, &program->shader,
+                                    &sctx->b.debug)) {
+                       FREE(sel.tokens);
+                       FREE(program);
+                       return NULL;
+               }
+
+               scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
+
+               shader->config.rsrc1 =
+                          S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
+                          S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
+                          S_00B848_DX10_CLAMP(1) |
+                          S_00B848_FLOAT_MODE(shader->config.float_mode);
+
+               shader->config.rsrc2 = S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR) |
+                          S_00B84C_SCRATCH_EN(scratch_enabled) |
+                          S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
+                          S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
+                          S_00B84C_LDS_SIZE(shader->config.lds_size);
+
+               FREE(sel.tokens);
+       } else {
+               const struct pipe_llvm_program_header *header;
+               const char *code;
+               header = cso->prog;
+               code = cso->prog + sizeof(struct pipe_llvm_program_header);
+
+               radeon_elf_read(code, header->num_bytes, &program->shader.binary);
+               si_shader_binary_read_config(&program->shader.binary,
+                            &program->shader.config, 0);
+               si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
+                              PIPE_SHADER_COMPUTE, stderr);
+               si_shader_binary_upload(sctx->screen, &program->shader);
+       }
 
        return program;
 }
@@ -125,149 +155,319 @@ static void si_set_global_binding(
        }
 }
 
-/**
- * This function computes the value for R_00B860_COMPUTE_TMPRING_SIZE.WAVES
- * /p block_layout is the number of threads in each work group.
- * /p grid layout is the number of work groups.
- */
-static unsigned compute_num_waves_for_scratch(
-               const struct radeon_info *info,
-               const uint *block_layout,
-               const uint *grid_layout)
+static void si_initialize_compute(struct si_context *sctx)
 {
-       unsigned num_sh = MAX2(info->max_sh_per_se, 1);
-       unsigned num_se = MAX2(info->max_se, 1);
-       unsigned num_blocks = 1;
-       unsigned threads_per_block = 1;
-       unsigned waves_per_block;
-       unsigned waves_per_sh;
-       unsigned waves;
-       unsigned scratch_waves;
-       unsigned i;
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+
+       radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
+       radeon_emit(cs, 0);
+       radeon_emit(cs, 0);
+       radeon_emit(cs, 0);
+
+       radeon_set_sh_reg_seq(cs, R_00B854_COMPUTE_RESOURCE_LIMITS, 3);
+       radeon_emit(cs, 0);
+       /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
+       radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
+       radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
+
+       if (sctx->b.chip_class >= CIK) {
+               /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
+               radeon_set_sh_reg_seq(cs,
+                                    R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
+               radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) |
+                               S_00B864_SH1_CU_EN(0xffff));
+               radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) |
+                               S_00B868_SH1_CU_EN(0xffff));
+       }
 
-       for (i = 0; i < 3; i++) {
-               threads_per_block *= block_layout[i];
-               num_blocks *= grid_layout[i];
+       /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
+        * and is now per pipe, so it should be handled in the
+        * kernel if we want to use something other than the default value,
+        * which is now 0x22f.
+        */
+       if (sctx->b.chip_class <= SI) {
+               /* XXX: This should be:
+                * (number of compute units) * 4 * (waves per simd) - 1 */
+
+               radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
+                                 0x190 /* Default value */);
        }
 
-       waves_per_block = align(threads_per_block, 64) / 64;
-       waves = waves_per_block * num_blocks;
-       waves_per_sh = align(waves, num_sh * num_se) / (num_sh * num_se);
-       scratch_waves = waves_per_sh * num_sh * num_se;
+       sctx->cs_shader_state.emitted_program = NULL;
+       sctx->cs_shader_state.initialized = true;
+}
 
-       if (waves_per_block > waves_per_sh) {
-               scratch_waves = waves_per_block * num_sh * num_se;
+static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
+                                            struct si_shader *shader,
+                                            struct si_shader_config *config)
+{
+       uint64_t scratch_bo_size, scratch_needed;
+       scratch_bo_size = 0;
+       scratch_needed = config->scratch_bytes_per_wave * sctx->scratch_waves;
+       if (sctx->compute_scratch_buffer)
+               scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
+
+       if (scratch_bo_size < scratch_needed) {
+               pipe_resource_reference(
+                       (struct pipe_resource**)&sctx->compute_scratch_buffer,
+                       NULL);
+
+               sctx->compute_scratch_buffer =
+                               si_resource_create_custom(&sctx->screen->b.b,
+                                PIPE_USAGE_DEFAULT, scratch_needed);
+
+               if (!sctx->compute_scratch_buffer)
+                       return false;
        }
 
-       return scratch_waves;
+       if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
+               uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
+
+               si_shader_apply_scratch_relocs(sctx, shader, config, scratch_va);
+
+               if (si_shader_binary_upload(sctx->screen, shader))
+                       return false;
+
+               r600_resource_reference(&shader->scratch_bo,
+                                       sctx->compute_scratch_buffer);
+       }
+
+       return true;
 }
 
-static void si_launch_grid(
-               struct pipe_context *ctx,
-               const uint *block_layout, const uint *grid_layout,
-               uint32_t pc, const void *input)
+static bool si_switch_compute_shader(struct si_context *sctx,
+                                     struct si_compute *program,
+                                     struct si_shader *shader, unsigned offset)
 {
-       struct si_context *sctx = (struct si_context*)ctx;
-       struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct si_shader_config inline_config = {0};
+       struct si_shader_config *config;
+       uint64_t shader_va;
+
+       if (sctx->cs_shader_state.emitted_program == program &&
+           sctx->cs_shader_state.offset == offset)
+               return true;
+
+       if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+               config = &shader->config;
+       } else {
+               unsigned lds_blocks;
+
+               config = &inline_config;
+               si_shader_binary_read_config(&shader->binary, config, offset);
+
+               lds_blocks = config->lds_size;
+               /* XXX: We are over allocating LDS.  For SI, the shader reports
+               * LDS in blocks of 256 bytes, so if there are 4 bytes lds
+               * allocated in the shader and 4 bytes allocated by the state
+               * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
+               */
+               if (sctx->b.chip_class <= SI) {
+                       lds_blocks += align(program->local_size, 256) >> 8;
+               } else {
+                       lds_blocks += align(program->local_size, 512) >> 9;
+               }
+
+               assert(lds_blocks <= 0xFF);
+
+               config->rsrc2 &= C_00B84C_LDS_SIZE;
+               config->rsrc2 |=  S_00B84C_LDS_SIZE(lds_blocks);
+       }
+
+       if (!si_setup_compute_scratch_buffer(sctx, shader, config))
+               return false;
+
+       if (shader->scratch_bo) {
+               COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
+                           "Total Scratch: %u bytes\n", sctx->scratch_waves,
+                           config->scratch_bytes_per_wave,
+                           config->scratch_bytes_per_wave *
+                           sctx->scratch_waves);
+
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+                             shader->scratch_bo, RADEON_USAGE_READWRITE,
+                             RADEON_PRIO_SCRATCH_BUFFER);
+       }
+
+       shader_va = shader->bo->gpu_address + offset;
+
+       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
+                                 RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER);
+
+       radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
+       radeon_emit(cs, shader_va >> 8);
+       radeon_emit(cs, shader_va >> 40);
+
+       radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
+       radeon_emit(cs, config->rsrc1);
+       radeon_emit(cs, config->rsrc2);
+
+       radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
+                 S_00B860_WAVES(sctx->scratch_waves)
+                    | S_00B860_WAVESIZE(config->scratch_bytes_per_wave >> 10));
+
+       sctx->cs_shader_state.emitted_program = program;
+       sctx->cs_shader_state.offset = offset;
+       sctx->cs_shader_state.uses_scratch =
+               config->scratch_bytes_per_wave != 0;
+
+       return true;
+}
+
+static void si_upload_compute_input(struct si_context *sctx,
+                                  const struct pipe_grid_info *info)
+{
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
        struct si_compute *program = sctx->cs_shader_state.program;
-       struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
-       struct r600_resource *input_buffer = program->input_buffer;
+       struct r600_resource *input_buffer = NULL;
        unsigned kernel_args_size;
        unsigned num_work_size_bytes = 36;
        uint32_t kernel_args_offset = 0;
        uint32_t *kernel_args;
+       void *kernel_args_ptr;
        uint64_t kernel_args_va;
-       uint64_t scratch_buffer_va = 0;
-       uint64_t shader_va;
-       unsigned arg_user_sgpr_count = NUM_USER_SGPRS;
        unsigned i;
-       struct si_shader *shader = &program->kernels[pc];
-       unsigned lds_blocks;
-       unsigned num_waves_for_scratch;
-
-       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0) | PKT3_SHADER_TYPE_S(1));
-       radeon_emit(cs, 0x80000000);
-       radeon_emit(cs, 0x80000000);
 
-       pm4->compute_pkt = true;
+       /* The extra num_work_size_bytes are for work group / work item size information */
+       kernel_args_size = program->input_size + num_work_size_bytes;
 
-       si_pm4_cmd_begin(pm4, PKT3_EVENT_WRITE);
-       si_pm4_cmd_add(pm4, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH) |
-                           EVENT_INDEX(0x7) |
-                           EVENT_WRITE_INV_L2);
-       si_pm4_cmd_end(pm4, false);
+       u_upload_alloc(sctx->b.uploader, 0, kernel_args_size, 256,
+                      &kernel_args_offset,
+                      (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
 
-       si_pm4_inval_texture_cache(pm4);
-       si_pm4_inval_shader_cache(pm4);
-       si_cmd_surface_sync(pm4, pm4->cp_coher_cntl);
+       kernel_args = (uint32_t*)kernel_args_ptr;
+       for (i = 0; i < 3; i++) {
+               kernel_args[i] = info->grid[i];
+               kernel_args[i + 3] = info->grid[i] * info->block[i];
+               kernel_args[i + 6] = info->block[i];
+       }
 
-       /* Upload the kernel arguments */
+       memcpy(kernel_args + (num_work_size_bytes / 4), info->input,
+              program->input_size);
 
-       /* The extra num_work_size_bytes are for work group / work item size information */
-       kernel_args_size = program->input_size + num_work_size_bytes + 8 /* For scratch va */;
 
-       kernel_args = sctx->b.ws->buffer_map(input_buffer->cs_buf,
-                       sctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE);
-       for (i = 0; i < 3; i++) {
-               kernel_args[i] = grid_layout[i];
-               kernel_args[i + 3] = grid_layout[i] * block_layout[i];
-               kernel_args[i + 6] = block_layout[i];
+       for (i = 0; i < (kernel_args_size / 4); i++) {
+               COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
+                       kernel_args[i]);
        }
 
-       num_waves_for_scratch = compute_num_waves_for_scratch(
-               &sctx->screen->b.info, block_layout, grid_layout);
+       kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
 
-       memcpy(kernel_args + (num_work_size_bytes / 4), input, program->input_size);
+       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
+                                 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
 
-       if (shader->scratch_bytes_per_wave > 0) {
-               unsigned scratch_bytes = shader->scratch_bytes_per_wave *
-                                               num_waves_for_scratch;
+       radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
+       radeon_emit(cs, kernel_args_va);
+       radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
+                       S_008F04_STRIDE(0));
 
-               COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
-                           "Total Scratch: %u bytes\n", num_waves_for_scratch,
-                           shader->scratch_bytes_per_wave, scratch_bytes);
-               if (!shader->scratch_bo) {
-                       shader->scratch_bo = (struct r600_resource*)
-                               si_resource_create_custom(sctx->b.b.screen,
-                               PIPE_USAGE_DEFAULT, scratch_bytes);
+       pipe_resource_reference((struct pipe_resource**)&input_buffer, NULL);
+}
+
+static void si_setup_tgsi_grid(struct si_context *sctx,
+                                const struct pipe_grid_info *info)
+{
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
+                                 4 * SI_SGPR_GRID_SIZE;
+
+       if (info->indirect) {
+               uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+               uint64_t va = base_va + info->indirect_offset;
+               int i;
+
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+                                (struct r600_resource *)info->indirect,
+                                RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
+
+               for (i = 0; i < 3; ++i) {
+                       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+                       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+                                       COPY_DATA_DST_SEL(COPY_DATA_REG));
+                       radeon_emit(cs, (va +  4 * i));
+                       radeon_emit(cs, (va + 4 * i) >> 32);
+                       radeon_emit(cs, (grid_size_reg >> 2) + i);
+                       radeon_emit(cs, 0);
                }
-               scratch_buffer_va = shader->scratch_bo->gpu_address;
-               si_pm4_add_bo(pm4, shader->scratch_bo,
-                               RADEON_USAGE_READWRITE,
-                               RADEON_PRIO_SHADER_RESOURCE_RW);
+       } else {
 
+               radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
+               radeon_emit(cs, info->grid[0]);
+               radeon_emit(cs, info->grid[1]);
+               radeon_emit(cs, info->grid[2]);
        }
+}
 
-       for (i = 0; i < (kernel_args_size / 4); i++) {
-               COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
-                       kernel_args[i]);
+static void si_emit_dispatch_packets(struct si_context *sctx,
+                                     const struct pipe_grid_info *info)
+{
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
+
+       radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
+       radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
+       radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
+       radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
+
+       if (info->indirect) {
+               uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+                                (struct r600_resource *)info->indirect,
+                                RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
+
+               radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
+                               PKT3_SHADER_TYPE_S(1));
+               radeon_emit(cs, 1);
+               radeon_emit(cs, base_va);
+               radeon_emit(cs, base_va >> 32);
+
+               radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
+                               PKT3_SHADER_TYPE_S(1));
+               radeon_emit(cs, info->indirect_offset);
+               radeon_emit(cs, 1);
+       } else {
+               radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
+                               PKT3_SHADER_TYPE_S(1));
+               radeon_emit(cs, info->grid[0]);
+               radeon_emit(cs, info->grid[1]);
+               radeon_emit(cs, info->grid[2]);
+               radeon_emit(cs, 1);
        }
+}
+
+
+static void si_launch_grid(
+               struct pipe_context *ctx, const struct pipe_grid_info *info)
+{
+       struct si_context *sctx = (struct si_context*)ctx;
+       struct si_compute *program = sctx->cs_shader_state.program;
+       int i;
 
-       sctx->b.ws->buffer_unmap(input_buffer->cs_buf);
+       si_decompress_compute_textures(sctx);
 
-       kernel_args_va = input_buffer->gpu_address;
-       kernel_args_va += kernel_args_offset;
+       si_need_cs_space(sctx);
 
-       si_pm4_add_bo(pm4, input_buffer, RADEON_USAGE_READ,
-               RADEON_PRIO_SHADER_DATA);
+       if (!sctx->cs_shader_state.initialized)
+               si_initialize_compute(sctx);
 
-       si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0, kernel_args_va);
-       si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0 + 4, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) | S_008F04_STRIDE(0));
-       si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0 + 8, scratch_buffer_va);
-       si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0 + 12,
-               S_008F04_BASE_ADDRESS_HI(scratch_buffer_va >> 32)
-               |  S_008F04_STRIDE(shader->scratch_bytes_per_wave / 64));
+       if (sctx->b.flags)
+               si_emit_cache_flush(sctx, NULL);
 
-       si_pm4_set_reg(pm4, R_00B810_COMPUTE_START_X, 0);
-       si_pm4_set_reg(pm4, R_00B814_COMPUTE_START_Y, 0);
-       si_pm4_set_reg(pm4, R_00B818_COMPUTE_START_Z, 0);
+       if (!si_switch_compute_shader(sctx, program, &program->shader, info->pc))
+               return;
 
-       si_pm4_set_reg(pm4, R_00B81C_COMPUTE_NUM_THREAD_X,
-                               S_00B81C_NUM_THREAD_FULL(block_layout[0]));
-       si_pm4_set_reg(pm4, R_00B820_COMPUTE_NUM_THREAD_Y,
-                               S_00B820_NUM_THREAD_FULL(block_layout[1]));
-       si_pm4_set_reg(pm4, R_00B824_COMPUTE_NUM_THREAD_Z,
-                               S_00B824_NUM_THREAD_FULL(block_layout[2]));
+       si_upload_compute_shader_descriptors(sctx);
+       si_emit_compute_shader_userdata(sctx);
+
+       if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
+               sctx->atoms.s.render_cond->emit(&sctx->b,
+                                               sctx->atoms.s.render_cond);
+               si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
+       }
+
+       if (program->input_size || program->ir_type == PIPE_SHADER_IR_NATIVE)
+               si_upload_compute_input(sctx, info);
 
        /* Global buffers */
        for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
@@ -276,138 +476,41 @@ static void si_launch_grid(
                if (!buffer) {
                        continue;
                }
-               si_pm4_add_bo(pm4, buffer, RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW);
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
+                                         RADEON_USAGE_READWRITE,
+                                         RADEON_PRIO_COMPUTE_GLOBAL);
        }
 
-       /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
-        * and is now per pipe, so it should be handled in the
-        * kernel if we want to use something other than the default value,
-        * which is now 0x22f.
-        */
-       if (sctx->b.chip_class <= SI) {
-               /* XXX: This should be:
-                * (number of compute units) * 4 * (waves per simd) - 1 */
+       if (program->ir_type == PIPE_SHADER_IR_TGSI)
+               si_setup_tgsi_grid(sctx, info);
 
-               si_pm4_set_reg(pm4, R_00B82C_COMPUTE_MAX_WAVE_ID,
-                                               0x190 /* Default value */);
-       }
+       si_ce_pre_draw_synchronization(sctx);
 
-       shader_va = shader->bo->gpu_address;
-       si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
-       si_pm4_set_reg(pm4, R_00B830_COMPUTE_PGM_LO, (shader_va >> 8) & 0xffffffff);
-       si_pm4_set_reg(pm4, R_00B834_COMPUTE_PGM_HI, shader_va >> 40);
-
-       si_pm4_set_reg(pm4, R_00B848_COMPUTE_PGM_RSRC1,
-               /* We always use at least 3 VGPRS, these come from
-                * TIDIG_COMP_CNT.
-                * XXX: The compiler should account for this.
-                */
-               S_00B848_VGPRS((MAX2(3, shader->num_vgprs) - 1) / 4)
-               /* We always use at least 4 + arg_user_sgpr_count.  The 4 extra
-                * sgprs are from TGID_X_EN, TGID_Y_EN, TGID_Z_EN, TG_SIZE_EN
-                * XXX: The compiler should account for this.
-                */
-               |  S_00B848_SGPRS(((MAX2(4 + arg_user_sgpr_count,
-                                       shader->num_sgprs)) - 1) / 8))
-               ;
-
-       lds_blocks = shader->lds_size;
-       /* XXX: We are over allocating LDS.  For SI, the shader reports LDS in
-        * blocks of 256 bytes, so if there are 4 bytes lds allocated in
-        * the shader and 4 bytes allocated by the state tracker, then
-        * we will set LDS_SIZE to 512 bytes rather than 256.
-        */
-       if (sctx->b.chip_class <= SI) {
-               lds_blocks += align(program->local_size, 256) >> 8;
-       } else {
-               lds_blocks += align(program->local_size, 512) >> 9;
-       }
+       si_emit_dispatch_packets(sctx, info);
 
-       assert(lds_blocks <= 0xFF);
-
-       si_pm4_set_reg(pm4, R_00B84C_COMPUTE_PGM_RSRC2,
-               S_00B84C_SCRATCH_EN(shader->scratch_bytes_per_wave > 0)
-               | S_00B84C_USER_SGPR(arg_user_sgpr_count)
-               | S_00B84C_TGID_X_EN(1)
-               | S_00B84C_TGID_Y_EN(1)
-               | S_00B84C_TGID_Z_EN(1)
-               | S_00B84C_TG_SIZE_EN(1)
-               | S_00B84C_TIDIG_COMP_CNT(2)
-               | S_00B84C_LDS_SIZE(lds_blocks)
-               | S_00B84C_EXCP_EN(0))
-               ;
-       si_pm4_set_reg(pm4, R_00B854_COMPUTE_RESOURCE_LIMITS, 0);
-
-       si_pm4_set_reg(pm4, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0,
-               S_00B858_SH0_CU_EN(0xffff /* Default value */)
-               | S_00B858_SH1_CU_EN(0xffff /* Default value */))
-               ;
-
-       si_pm4_set_reg(pm4, R_00B85C_COMPUTE_STATIC_THREAD_MGMT_SE1,
-               S_00B85C_SH0_CU_EN(0xffff /* Default value */)
-               | S_00B85C_SH1_CU_EN(0xffff /* Default value */))
-               ;
-
-       si_pm4_set_reg(pm4, R_00B860_COMPUTE_TMPRING_SIZE,
-               /* The maximum value for WAVES is 32 * num CU.
-                * If you program this value incorrectly, the GPU will hang if
-                * COMPUTE_PGM_RSRC2.SCRATCH_EN is enabled.
-                */
-               S_00B860_WAVES(num_waves_for_scratch)
-               | S_00B860_WAVESIZE(shader->scratch_bytes_per_wave >> 10))
-               ;
-
-       si_pm4_cmd_begin(pm4, PKT3_DISPATCH_DIRECT);
-       si_pm4_cmd_add(pm4, grid_layout[0]); /* Thread groups DIM_X */
-       si_pm4_cmd_add(pm4, grid_layout[1]); /* Thread groups DIM_Y */
-       si_pm4_cmd_add(pm4, grid_layout[2]); /* Thread gropus DIM_Z */
-       si_pm4_cmd_add(pm4, 1); /* DISPATCH_INITIATOR */
-        si_pm4_cmd_end(pm4, false);
-
-       si_pm4_cmd_begin(pm4, PKT3_EVENT_WRITE);
-       si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(0x4)));
-       si_pm4_cmd_end(pm4, false);
-
-       si_pm4_inval_texture_cache(pm4);
-       si_pm4_inval_shader_cache(pm4);
-       si_cmd_surface_sync(pm4, pm4->cp_coher_cntl);
-
-       si_pm4_emit(sctx, pm4);
-
-#if 0
-       fprintf(stderr, "cdw: %i\n", sctx->cs->cdw);
-       for (i = 0; i < sctx->cs->cdw; i++) {
-               fprintf(stderr, "%4i : 0x%08X\n", i, sctx->cs->buf[i]);
-       }
-#endif
+       si_ce_post_draw_synchronization(sctx);
 
-       si_pm4_free_state(sctx, pm4, ~0);
+       sctx->b.num_compute_calls++;
+       if (sctx->cs_shader_state.uses_scratch)
+               sctx->b.num_spill_compute_calls++;
 }
 
 
 static void si_delete_compute_state(struct pipe_context *ctx, void* state){
        struct si_compute *program = (struct si_compute *)state;
+       struct si_context *sctx = (struct si_context*)ctx;
 
        if (!state) {
                return;
        }
 
-       if (program->kernels) {
-               for (int i = 0; i < program->num_kernels; i++){
-                       if (program->kernels[i].bo){
-                               si_shader_destroy(ctx, &program->kernels[i]);
-                       }
-               }
-               FREE(program->kernels);
-       }
+       if (program == sctx->cs_shader_state.program)
+               sctx->cs_shader_state.program = NULL;
 
-       if (program->llvm_ctx){
-               LLVMContextDispose(program->llvm_ctx);
-       }
-       pipe_resource_reference(
-               (struct pipe_resource **)&program->input_buffer, NULL);
+       if (program == sctx->cs_shader_state.emitted_program)
+               sctx->cs_shader_state.emitted_program = NULL;
 
-       //And then free the program itself.
+       si_shader_destroy(&program->shader);
        FREE(program);
 }