radeonsi: use the DISPATCH packets to force COMPUTE_START_X/Y/Z = 0
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
index 4d27e86b4146da9a07b04e7ad0b978de5165cb52..91a6a406c6f178b86e57ce27cad4bbdb5106ef22 100644 (file)
  *
  */
 
+#include "tgsi/tgsi_parse.h"
 #include "util/u_memory.h"
-#include "radeon/r600_pipe_common.h"
-#include "radeon/radeon_elf_util.h"
-#include "radeon/radeon_llvm_util.h"
+#include "util/u_upload_mgr.h"
 
+#include "amd_kernel_code_t.h"
 #include "radeon/r600_cs.h"
 #include "si_pipe.h"
-#include "si_shader.h"
+#include "si_compute.h"
 #include "sid.h"
 
-#define MAX_GLOBAL_BUFFERS 20
-
-struct si_compute {
-       struct si_context *ctx;
-
-       unsigned local_size;
-       unsigned private_size;
-       unsigned input_size;
-       struct si_shader shader;
-       unsigned num_user_sgprs;
-
-       struct r600_resource *input_buffer;
-       struct pipe_resource *global_buffers[MAX_GLOBAL_BUFFERS];
-
-#if HAVE_LLVM < 0x0306
-       unsigned num_kernels;
-       struct si_shader *kernels;
-       LLVMContextRef llvm_ctx;
-#endif
+struct dispatch_packet {
+       uint16_t header;
+       uint16_t setup;
+       uint16_t workgroup_size_x;
+       uint16_t workgroup_size_y;
+       uint16_t workgroup_size_z;
+       uint16_t reserved0;
+       uint32_t grid_size_x;
+       uint32_t grid_size_y;
+       uint32_t grid_size_z;
+       uint32_t private_segment_size;
+       uint32_t group_segment_size;
+       uint64_t kernel_object;
+       uint64_t kernarg_address;
+       uint64_t reserved2;
 };
 
-static void init_scratch_buffer(struct si_context *sctx, struct si_compute *program)
+static const amd_kernel_code_t *si_compute_get_code_object(
+       const struct si_compute *program,
+       uint64_t symbol_offset)
 {
-       unsigned scratch_bytes = 0;
-       uint64_t scratch_buffer_va;
-       unsigned i;
-
-       /* Compute the scratch buffer size using the maximum number of waves.
-        * This way we don't need to recompute it for each kernel launch. */
-       unsigned scratch_waves = 32 * sctx->screen->b.info.num_good_compute_units;
-       for (i = 0; i < program->shader.binary.global_symbol_count; i++) {
-               unsigned offset =
-                               program->shader.binary.global_symbol_offsets[i];
-               unsigned scratch_bytes_needed;
-
-               si_shader_binary_read_config(&program->shader.binary,
-                                            &program->shader.config, offset);
-               scratch_bytes_needed = program->shader.config.scratch_bytes_per_wave;
-               scratch_bytes = MAX2(scratch_bytes, scratch_bytes_needed);
+       if (!program->use_code_object_v2) {
+               return NULL;
        }
+       return (const amd_kernel_code_t*)
+               (program->shader.binary.code + symbol_offset);
+}
 
-       if (scratch_bytes == 0)
-               return;
-
-       program->shader.scratch_bo =
-                               si_resource_create_custom(sctx->b.b.screen,
-                               PIPE_USAGE_DEFAULT,
-                               scratch_bytes * scratch_waves);
+static void code_object_to_config(const amd_kernel_code_t *code_object,
+                                 struct si_shader_config *out_config) {
+
+       uint32_t rsrc1 = code_object->compute_pgm_resource_registers;
+       uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32;
+       out_config->num_sgprs = code_object->wavefront_sgpr_count;
+       out_config->num_vgprs = code_object->workitem_vgpr_count;
+       out_config->float_mode = G_00B028_FLOAT_MODE(rsrc1);
+       out_config->rsrc1 = rsrc1;
+       out_config->lds_size = MAX2(out_config->lds_size, G_00B84C_LDS_SIZE(rsrc2));
+       out_config->rsrc2 = rsrc2;
+       out_config->scratch_bytes_per_wave =
+               align(code_object->workitem_private_segment_byte_size * 64, 1024);
+}
 
-       scratch_buffer_va = program->shader.scratch_bo->gpu_address;
+/* Asynchronous compute shader compilation. */
+static void si_create_compute_state_async(void *job, int thread_index)
+{
+       struct si_compute *program = (struct si_compute *)job;
+       struct si_shader *shader = &program->shader;
+       struct si_shader_selector sel;
+       LLVMTargetMachineRef tm;
+       struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug;
+
+       if (thread_index >= 0) {
+               assert(thread_index < ARRAY_SIZE(program->screen->tm));
+               tm = program->screen->tm[thread_index];
+               if (!debug->async)
+                       debug = NULL;
+       } else {
+               tm = program->compiler_ctx_state.tm;
+       }
 
-       /* apply_scratch_relocs needs scratch_bytes_per_wave to be set
-        * to the maximum bytes needed, so it can compute the stride
-        * correctly.
-        */
-       program->shader.config.scratch_bytes_per_wave = scratch_bytes;
+       memset(&sel, 0, sizeof(sel));
+
+       sel.screen = program->screen;
+       tgsi_scan_shader(program->tokens, &sel.info);
+       sel.tokens = program->tokens;
+       sel.type = PIPE_SHADER_COMPUTE;
+       sel.local_size = program->local_size;
+       si_get_active_slot_masks(&sel.info,
+                                &program->active_const_and_shader_buffers,
+                                &program->active_samplers_and_images);
+
+       program->shader.selector = &sel;
+       program->shader.is_monolithic = true;
+       program->uses_grid_size = sel.info.uses_grid_size;
+       program->uses_block_size = sel.info.uses_block_size;
+       program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
+       program->uses_bindless_images = sel.info.uses_bindless_images;
+
+       if (si_shader_create(program->screen, tm, &program->shader, debug)) {
+               program->shader.compilation_failed = true;
+       } else {
+               bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
+               unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS +
+                                     (sel.info.uses_grid_size ? 3 : 0) +
+                                     (sel.info.uses_block_size ? 3 : 0);
+
+               shader->config.rsrc1 =
+                       S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
+                       S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
+                       S_00B848_DX10_CLAMP(1) |
+                       S_00B848_FLOAT_MODE(shader->config.float_mode);
+
+               shader->config.rsrc2 =
+                       S_00B84C_USER_SGPR(user_sgprs) |
+                       S_00B84C_SCRATCH_EN(scratch_enabled) |
+                       S_00B84C_TGID_X_EN(sel.info.uses_block_id[0]) |
+                       S_00B84C_TGID_Y_EN(sel.info.uses_block_id[1]) |
+                       S_00B84C_TGID_Z_EN(sel.info.uses_block_id[2]) |
+                       S_00B84C_TIDIG_COMP_CNT(sel.info.uses_thread_id[2] ? 2 :
+                                               sel.info.uses_thread_id[1] ? 1 : 0) |
+                       S_00B84C_LDS_SIZE(shader->config.lds_size);
+
+               program->variable_group_size =
+                       sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
+       }
 
-       /* Patch the shader with the scratch buffer address. */
-       si_shader_apply_scratch_relocs(sctx,
-                               &program->shader, scratch_buffer_va);
+       FREE(program->tokens);
+       program->shader.selector = NULL;
 }
 
 static void *si_create_compute_state(
@@ -99,57 +148,61 @@ static void *si_create_compute_state(
        const struct pipe_compute_state *cso)
 {
        struct si_context *sctx = (struct si_context *)ctx;
+       struct si_screen *sscreen = (struct si_screen *)ctx->screen;
        struct si_compute *program = CALLOC_STRUCT(si_compute);
-       const struct pipe_llvm_program_header *header;
-       const char *code;
-
-       header = cso->prog;
-       code = cso->prog + sizeof(struct pipe_llvm_program_header);
 
-       program->ctx = sctx;
+       program->screen = (struct si_screen *)ctx->screen;
+       program->ir_type = cso->ir_type;
        program->local_size = cso->req_local_mem;
        program->private_size = cso->req_private_mem;
        program->input_size = cso->req_input_mem;
-
-#if HAVE_LLVM < 0x0306
-       {
-               unsigned i;
-               program->llvm_ctx = LLVMContextCreate();
-               program->num_kernels = radeon_llvm_get_num_kernels(program->llvm_ctx,
-                                       code, header->num_bytes);
-               program->kernels = CALLOC(sizeof(struct si_shader),
-                                                        program->num_kernels);
-               for (i = 0; i < program->num_kernels; i++) {
-                       LLVMModuleRef mod = radeon_llvm_get_kernel_module(program->llvm_ctx, i,
-                                                        code, header->num_bytes);
-                       si_compile_llvm(sctx->screen, &program->kernels[i].binary,
-                                       &program->kernels[i].config, sctx->tm,
-                                       mod, &sctx->b.debug, TGSI_PROCESSOR_COMPUTE,
-                                       "Compute Shader");
-                       si_shader_dump(sctx->screen, &program->kernels[i],
-                                      &sctx->b.debug, TGSI_PROCESSOR_COMPUTE);
-                       si_shader_binary_upload(sctx->screen, &program->kernels[i]);
-                       LLVMDisposeModule(mod);
+       program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
+                                       cso->ir_type == PIPE_SHADER_IR_NATIVE;
+
+       if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
+               program->tokens = tgsi_dup_tokens(cso->prog);
+               if (!program->tokens) {
+                       FREE(program);
+                       return NULL;
                }
-       }
-#else
-
-       radeon_elf_read(code, header->num_bytes, &program->shader.binary);
 
-       /* init_scratch_buffer patches the shader code with the scratch address,
-        * so we need to call it before si_shader_binary_read() which uploads
-        * the shader code to the GPU.
-        */
-       init_scratch_buffer(sctx, program);
-       si_shader_binary_read_config(&program->shader.binary,
+               program->compiler_ctx_state.tm = sctx->tm;
+               program->compiler_ctx_state.debug = sctx->b.debug;
+               program->compiler_ctx_state.is_debug_context = sctx->is_debug;
+               p_atomic_inc(&sscreen->b.num_shaders_created);
+               util_queue_fence_init(&program->ready);
+
+               if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
+                   sctx->is_debug ||
+                   r600_can_dump_shader(&sscreen->b, PIPE_SHADER_COMPUTE))
+                       si_create_compute_state_async(program, -1);
+               else
+                       util_queue_add_job(&sscreen->shader_compiler_queue,
+                                          program, &program->ready,
+                                          si_create_compute_state_async, NULL);
+       } else {
+               const struct pipe_llvm_program_header *header;
+               const char *code;
+               header = cso->prog;
+               code = cso->prog + sizeof(struct pipe_llvm_program_header);
+
+               ac_elf_read(code, header->num_bytes, &program->shader.binary);
+               if (program->use_code_object_v2) {
+                       const amd_kernel_code_t *code_object =
+                               si_compute_get_code_object(program, 0);
+                       code_object_to_config(code_object, &program->shader.config);
+               } else {
+                       si_shader_binary_read_config(&program->shader.binary,
                                     &program->shader.config, 0);
-       si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
-                      TGSI_PROCESSOR_COMPUTE);
-       si_shader_binary_upload(sctx->screen, &program->shader);
-
-#endif
-       program->input_buffer = si_resource_create_custom(sctx->b.b.screen,
-               PIPE_USAGE_IMMUTABLE, program->input_size);
+               }
+               si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
+                              PIPE_SHADER_COMPUTE, stderr, true);
+               if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
+                       fprintf(stderr, "LLVM failed to upload shader\n");
+                       FREE(program);
+                       return NULL;
+               }
+       }
 
        return program;
 }
@@ -157,7 +210,24 @@ static void *si_create_compute_state(
 static void si_bind_compute_state(struct pipe_context *ctx, void *state)
 {
        struct si_context *sctx = (struct si_context*)ctx;
-       sctx->cs_shader_state.program = (struct si_compute*)state;
+       struct si_compute *program = (struct si_compute*)state;
+
+       sctx->cs_shader_state.program = program;
+       if (!program)
+               return;
+
+       /* Wait because we need active slot usage masks. */
+       if (program->ir_type == PIPE_SHADER_IR_TGSI)
+               util_queue_fence_wait(&program->ready);
+
+       si_set_active_descriptors(sctx,
+                                 SI_DESCS_FIRST_COMPUTE +
+                                 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
+                                 program->active_const_and_shader_buffers);
+       si_set_active_descriptors(sctx,
+                                 SI_DESCS_FIRST_COMPUTE +
+                                 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
+                                 program->active_samplers_and_images);
 }
 
 static void si_set_global_binding(
@@ -169,17 +239,19 @@ static void si_set_global_binding(
        struct si_context *sctx = (struct si_context*)ctx;
        struct si_compute *program = sctx->cs_shader_state.program;
 
+       assert(first + n <= MAX_GLOBAL_BUFFERS);
+
        if (!resources) {
-               for (i = first; i < first + n; i++) {
-                       pipe_resource_reference(&program->global_buffers[i], NULL);
+               for (i = 0; i < n; i++) {
+                       pipe_resource_reference(&program->global_buffers[first + i], NULL);
                }
                return;
        }
 
-       for (i = first; i < first + n; i++) {
+       for (i = 0; i < n; i++) {
                uint64_t va;
                uint32_t offset;
-               pipe_resource_reference(&program->global_buffers[i], resources[i]);
+               pipe_resource_reference(&program->global_buffers[first + i], resources[i]);
                va = r600_resource(resources[i])->gpu_address;
                offset = util_le32_to_cpu(*handles[i]);
                va += offset;
@@ -188,151 +260,567 @@ static void si_set_global_binding(
        }
 }
 
-/**
- * This function computes the value for R_00B860_COMPUTE_TMPRING_SIZE.WAVES
- * /p block_layout is the number of threads in each work group.
- * /p grid layout is the number of work groups.
- */
-static unsigned compute_num_waves_for_scratch(
-               const struct radeon_info *info,
-               const uint *block_layout,
-               const uint *grid_layout)
+static void si_initialize_compute(struct si_context *sctx)
 {
-       unsigned num_sh = MAX2(info->max_sh_per_se, 1);
-       unsigned num_se = MAX2(info->max_se, 1);
-       unsigned num_blocks = 1;
-       unsigned threads_per_block = 1;
-       unsigned waves_per_block;
-       unsigned waves_per_sh;
-       unsigned waves;
-       unsigned scratch_waves;
-       unsigned i;
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       uint64_t bc_va;
+
+       radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
+       /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
+       radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
+       radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
+
+       if (sctx->b.chip_class >= CIK) {
+               /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
+               radeon_set_sh_reg_seq(cs,
+                                    R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
+               radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) |
+                               S_00B864_SH1_CU_EN(0xffff));
+               radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) |
+                               S_00B868_SH1_CU_EN(0xffff));
+       }
 
-       for (i = 0; i < 3; i++) {
-               threads_per_block *= block_layout[i];
-               num_blocks *= grid_layout[i];
+       /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
+        * and is now per pipe, so it should be handled in the
+        * kernel if we want to use something other than the default value,
+        * which is now 0x22f.
+        */
+       if (sctx->b.chip_class <= SI) {
+               /* XXX: This should be:
+                * (number of compute units) * 4 * (waves per simd) - 1 */
+
+               radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
+                                 0x190 /* Default value */);
        }
 
-       waves_per_block = align(threads_per_block, 64) / 64;
-       waves = waves_per_block * num_blocks;
-       waves_per_sh = align(waves, num_sh * num_se) / (num_sh * num_se);
-       scratch_waves = waves_per_sh * num_sh * num_se;
+       /* Set the pointer to border colors. */
+       bc_va = sctx->border_color_buffer->gpu_address;
 
-       if (waves_per_block > waves_per_sh) {
-               scratch_waves = waves_per_block * num_sh * num_se;
+       if (sctx->b.chip_class >= CIK) {
+               radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
+               radeon_emit(cs, bc_va >> 8);  /* R_030E00_TA_CS_BC_BASE_ADDR */
+               radeon_emit(cs, bc_va >> 40); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
+       } else {
+               if (sctx->screen->b.info.drm_major == 3 ||
+                   (sctx->screen->b.info.drm_major == 2 &&
+                    sctx->screen->b.info.drm_minor >= 48)) {
+                       radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR,
+                                             bc_va >> 8);
+               }
        }
 
-       return scratch_waves;
+       sctx->cs_shader_state.emitted_program = NULL;
+       sctx->cs_shader_state.initialized = true;
 }
 
-static void si_launch_grid(
-               struct pipe_context *ctx,
-               const uint *block_layout, const uint *grid_layout,
-               uint32_t pc, const void *input)
+static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
+                                            struct si_shader *shader,
+                                            struct si_shader_config *config)
+{
+       uint64_t scratch_bo_size, scratch_needed;
+       scratch_bo_size = 0;
+       scratch_needed = config->scratch_bytes_per_wave * sctx->scratch_waves;
+       if (sctx->compute_scratch_buffer)
+               scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
+
+       if (scratch_bo_size < scratch_needed) {
+               r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
+
+               sctx->compute_scratch_buffer = (struct r600_resource*)
+                       r600_aligned_buffer_create(&sctx->screen->b.b,
+                                                  R600_RESOURCE_FLAG_UNMAPPABLE,
+                                                  PIPE_USAGE_DEFAULT,
+                                                  scratch_needed, 256);
+
+               if (!sctx->compute_scratch_buffer)
+                       return false;
+       }
+
+       if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
+               uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
+
+               si_shader_apply_scratch_relocs(shader, scratch_va);
+
+               if (si_shader_binary_upload(sctx->screen, shader))
+                       return false;
+
+               r600_resource_reference(&shader->scratch_bo,
+                                       sctx->compute_scratch_buffer);
+       }
+
+       return true;
+}
+
+static bool si_switch_compute_shader(struct si_context *sctx,
+                                     struct si_compute *program,
+                                    struct si_shader *shader,
+                                    const amd_kernel_code_t *code_object,
+                                    unsigned offset)
 {
-       struct si_context *sctx = (struct si_context*)ctx;
        struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
-       struct si_compute *program = sctx->cs_shader_state.program;
-       struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
-       struct r600_resource *input_buffer = program->input_buffer;
-       unsigned kernel_args_size;
-       unsigned num_work_size_bytes = 36;
-       uint32_t kernel_args_offset = 0;
-       uint32_t *kernel_args;
-       uint64_t kernel_args_va;
-       uint64_t scratch_buffer_va = 0;
+       struct si_shader_config inline_config = {0};
+       struct si_shader_config *config;
        uint64_t shader_va;
-       unsigned i;
-       struct si_shader *shader = &program->shader;
-       unsigned lds_blocks;
-       unsigned num_waves_for_scratch;
 
-#if HAVE_LLVM < 0x0306
-       shader = &program->kernels[pc];
-#endif
+       if (sctx->cs_shader_state.emitted_program == program &&
+           sctx->cs_shader_state.offset == offset)
+               return true;
 
+       if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+               config = &shader->config;
+       } else {
+               unsigned lds_blocks;
 
-       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0) | PKT3_SHADER_TYPE_S(1));
-       radeon_emit(cs, 0x80000000);
-       radeon_emit(cs, 0x80000000);
+               config = &inline_config;
+               if (code_object) {
+                       code_object_to_config(code_object, config);
+               } else {
+                       si_shader_binary_read_config(&shader->binary, config, offset);
+               }
 
-       sctx->b.flags |= SI_CONTEXT_INV_VMEM_L1 |
-                        SI_CONTEXT_INV_GLOBAL_L2 |
-                        SI_CONTEXT_INV_ICACHE |
-                        SI_CONTEXT_INV_SMEM_L1 |
-                        SI_CONTEXT_FLUSH_WITH_INV_L2 |
-                        SI_CONTEXT_FLAG_COMPUTE;
-       si_emit_cache_flush(sctx, NULL);
+               lds_blocks = config->lds_size;
+               /* XXX: We are over allocating LDS.  For SI, the shader reports
+               * LDS in blocks of 256 bytes, so if there are 4 bytes lds
+               * allocated in the shader and 4 bytes allocated by the state
+               * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
+               */
+               if (sctx->b.chip_class <= SI) {
+                       lds_blocks += align(program->local_size, 256) >> 8;
+               } else {
+                       lds_blocks += align(program->local_size, 512) >> 9;
+               }
 
-       pm4->compute_pkt = true;
+               /* TODO: use si_multiwave_lds_size_workaround */
+               assert(lds_blocks <= 0xFF);
 
-#if HAVE_LLVM >= 0x0306
-       /* Read the config information */
-       si_shader_binary_read_config(&shader->binary, &shader->config, pc);
-#endif
+               config->rsrc2 &= C_00B84C_LDS_SIZE;
+               config->rsrc2 |=  S_00B84C_LDS_SIZE(lds_blocks);
+       }
 
-       /* Upload the kernel arguments */
+       if (!si_setup_compute_scratch_buffer(sctx, shader, config))
+               return false;
 
-       /* The extra num_work_size_bytes are for work group / work item size information */
-       kernel_args_size = program->input_size + num_work_size_bytes + 8 /* For scratch va */;
-
-       kernel_args = sctx->b.ws->buffer_map(input_buffer->buf,
-                       sctx->b.gfx.cs, PIPE_TRANSFER_WRITE);
-       for (i = 0; i < 3; i++) {
-               kernel_args[i] = grid_layout[i];
-               kernel_args[i + 3] = grid_layout[i] * block_layout[i];
-               kernel_args[i + 6] = block_layout[i];
+       if (shader->scratch_bo) {
+               COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
+                           "Total Scratch: %u bytes\n", sctx->scratch_waves,
+                           config->scratch_bytes_per_wave,
+                           config->scratch_bytes_per_wave *
+                           sctx->scratch_waves);
+
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+                             shader->scratch_bo, RADEON_USAGE_READWRITE,
+                             RADEON_PRIO_SCRATCH_BUFFER);
        }
 
-       num_waves_for_scratch = compute_num_waves_for_scratch(
-               &sctx->screen->b.info, block_layout, grid_layout);
+       /* Prefetch the compute shader to TC L2.
+        *
+        * We should also prefetch graphics shaders if a compute dispatch was
+        * the last command, and the compute shader if a draw call was the last
+        * command. However, that would add more complexity and we're likely
+        * to get a shader state change in that case anyway.
+        */
+       if (sctx->b.chip_class >= CIK) {
+               cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
+                                        0, program->shader.bo->b.b.width0);
+       }
 
-       memcpy(kernel_args + (num_work_size_bytes / 4), input, program->input_size);
+       shader_va = shader->bo->gpu_address + offset;
+       if (program->use_code_object_v2) {
+               /* Shader code is placed after the amd_kernel_code_t
+                * struct. */
+               shader_va += sizeof(amd_kernel_code_t);
+       }
 
-       if (shader->config.scratch_bytes_per_wave > 0) {
+       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
+                                 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
 
-               COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
-                           "Total Scratch: %u bytes\n", num_waves_for_scratch,
-                           shader->config.scratch_bytes_per_wave,
-                           shader->config.scratch_bytes_per_wave *
-                           num_waves_for_scratch);
+       radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
+       radeon_emit(cs, shader_va >> 8);
+       radeon_emit(cs, shader_va >> 40);
 
-               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
-                                         shader->scratch_bo,
-                                         RADEON_USAGE_READWRITE,
-                                         RADEON_PRIO_SCRATCH_BUFFER);
+       radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
+       radeon_emit(cs, config->rsrc1);
+       radeon_emit(cs, config->rsrc2);
+
+       COMPUTE_DBG(sctx->screen, "COMPUTE_PGM_RSRC1: 0x%08x "
+               "COMPUTE_PGM_RSRC2: 0x%08x\n", config->rsrc1, config->rsrc2);
+
+       radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
+                 S_00B860_WAVES(sctx->scratch_waves)
+                    | S_00B860_WAVESIZE(config->scratch_bytes_per_wave >> 10));
 
-               scratch_buffer_va = shader->scratch_bo->gpu_address;
+       sctx->cs_shader_state.emitted_program = program;
+       sctx->cs_shader_state.offset = offset;
+       sctx->cs_shader_state.uses_scratch =
+               config->scratch_bytes_per_wave != 0;
+
+       return true;
+}
+
+static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
+                                         const amd_kernel_code_t *code_object,
+                                         unsigned user_sgpr)
+{
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
+
+       unsigned max_private_element_size = AMD_HSA_BITS_GET(
+                       code_object->code_properties,
+                       AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE);
+
+       uint32_t scratch_dword0 = scratch_va & 0xffffffff;
+       uint32_t scratch_dword1 =
+               S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
+               S_008F04_SWIZZLE_ENABLE(1);
+
+       /* Disable address clamping */
+       uint32_t scratch_dword2 = 0xffffffff;
+       uint32_t scratch_dword3 =
+               S_008F0C_INDEX_STRIDE(3) |
+               S_008F0C_ADD_TID_ENABLE(1);
+
+       if (sctx->b.chip_class >= GFX9) {
+               assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
+       } else {
+               scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
+
+               if (sctx->b.chip_class < VI) {
+                       /* BUF_DATA_FORMAT is ignored, but it cannot be
+                        * BUF_DATA_FORMAT_INVALID. */
+                       scratch_dword3 |=
+                               S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
+               }
        }
 
+       radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
+                                                       (user_sgpr * 4), 4);
+       radeon_emit(cs, scratch_dword0);
+       radeon_emit(cs, scratch_dword1);
+       radeon_emit(cs, scratch_dword2);
+       radeon_emit(cs, scratch_dword3);
+}
+
+static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
+                                      const amd_kernel_code_t *code_object,
+                                     const struct pipe_grid_info *info,
+                                     uint64_t kernel_args_va)
+{
+       struct si_compute *program = sctx->cs_shader_state.program;
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+
+       static const enum amd_code_property_mask_t workgroup_count_masks [] = {
+               AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
+               AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y,
+               AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
+       };
+
+       unsigned i, user_sgpr = 0;
+       if (AMD_HSA_BITS_GET(code_object->code_properties,
+                       AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER)) {
+               if (code_object->workitem_private_segment_byte_size > 0) {
+                       setup_scratch_rsrc_user_sgprs(sctx, code_object,
+                                                               user_sgpr);
+               }
+               user_sgpr += 4;
+       }
+
+       if (AMD_HSA_BITS_GET(code_object->code_properties,
+                       AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) {
+               struct dispatch_packet dispatch;
+               unsigned dispatch_offset;
+               struct r600_resource *dispatch_buf = NULL;
+               uint64_t dispatch_va;
+
+               /* Upload dispatch ptr */
+               memset(&dispatch, 0, sizeof(dispatch));
+
+               dispatch.workgroup_size_x = info->block[0];
+               dispatch.workgroup_size_y = info->block[1];
+               dispatch.workgroup_size_z = info->block[2];
+
+               dispatch.grid_size_x = info->grid[0] * info->block[0];
+               dispatch.grid_size_y = info->grid[1] * info->block[1];
+               dispatch.grid_size_z = info->grid[2] * info->block[2];
+
+               dispatch.private_segment_size = program->private_size;
+               dispatch.group_segment_size = program->local_size;
+
+               dispatch.kernarg_address = kernel_args_va;
+
+               u_upload_data(sctx->b.b.const_uploader, 0, sizeof(dispatch),
+                              256, &dispatch, &dispatch_offset,
+                              (struct pipe_resource**)&dispatch_buf);
+
+               if (!dispatch_buf) {
+                       fprintf(stderr, "Error: Failed to allocate dispatch "
+                                       "packet.");
+               }
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, dispatch_buf,
+                                 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
+
+               dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
+
+               radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
+                                                       (user_sgpr * 4), 2);
+               radeon_emit(cs, dispatch_va);
+               radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) |
+                                S_008F04_STRIDE(0));
+
+               r600_resource_reference(&dispatch_buf, NULL);
+               user_sgpr += 2;
+       }
+
+       if (AMD_HSA_BITS_GET(code_object->code_properties,
+                       AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) {
+               radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
+                                                       (user_sgpr * 4), 2);
+               radeon_emit(cs, kernel_args_va);
+               radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
+                               S_008F04_STRIDE(0));
+               user_sgpr += 2;
+       }
+
+       for (i = 0; i < 3 && user_sgpr < 16; i++) {
+               if (code_object->code_properties & workgroup_count_masks[i]) {
+                       radeon_set_sh_reg_seq(cs,
+                               R_00B900_COMPUTE_USER_DATA_0 +
+                               (user_sgpr * 4), 1);
+                       radeon_emit(cs, info->grid[i]);
+                       user_sgpr += 1;
+               }
+       }
+}
+
+static bool si_upload_compute_input(struct si_context *sctx,
+                                   const amd_kernel_code_t *code_object,
+                                   const struct pipe_grid_info *info)
+{
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       struct si_compute *program = sctx->cs_shader_state.program;
+       struct r600_resource *input_buffer = NULL;
+       unsigned kernel_args_size;
+       unsigned num_work_size_bytes = program->use_code_object_v2 ? 0 : 36;
+       uint32_t kernel_args_offset = 0;
+       uint32_t *kernel_args;
+       void *kernel_args_ptr;
+       uint64_t kernel_args_va;
+       unsigned i;
+
+       /* The extra num_work_size_bytes are for work group / work item size information */
+       kernel_args_size = program->input_size + num_work_size_bytes;
+
+       u_upload_alloc(sctx->b.b.const_uploader, 0, kernel_args_size,
+                      sctx->screen->b.info.tcc_cache_line_size,
+                      &kernel_args_offset,
+                      (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
+
+       if (unlikely(!kernel_args_ptr))
+               return false;
+
+       kernel_args = (uint32_t*)kernel_args_ptr;
+       kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
+
+       if (!code_object) {
+               for (i = 0; i < 3; i++) {
+                       kernel_args[i] = info->grid[i];
+                       kernel_args[i + 3] = info->grid[i] * info->block[i];
+                       kernel_args[i + 6] = info->block[i];
+               }
+       }
+
+       memcpy(kernel_args + (num_work_size_bytes / 4), info->input,
+              program->input_size);
+
+
        for (i = 0; i < (kernel_args_size / 4); i++) {
                COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
                        kernel_args[i]);
        }
 
-       kernel_args_va = input_buffer->gpu_address;
-       kernel_args_va += kernel_args_offset;
 
        radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
                                  RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
 
-       si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0, kernel_args_va);
-       si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0 + 4, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) | S_008F04_STRIDE(0));
-       si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0 + 8, scratch_buffer_va);
-       si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0 + 12,
-               S_008F04_BASE_ADDRESS_HI(scratch_buffer_va >> 32)
-               |  S_008F04_STRIDE(shader->config.scratch_bytes_per_wave / 64));
+       if (code_object) {
+               si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
+       } else {
+               radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
+               radeon_emit(cs, kernel_args_va);
+               radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
+                               S_008F04_STRIDE(0));
+       }
+
+       r600_resource_reference(&input_buffer, NULL);
+
+       return true;
+}
+
+static void si_setup_tgsi_grid(struct si_context *sctx,
+                                const struct pipe_grid_info *info)
+{
+       struct si_compute *program = sctx->cs_shader_state.program;
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
+                                4 * SI_NUM_RESOURCE_SGPRS;
+       unsigned block_size_reg = grid_size_reg +
+                                 /* 12 bytes = 3 dwords. */
+                                 12 * program->uses_grid_size;
+
+       if (info->indirect) {
+               if (program->uses_grid_size) {
+                       uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+                       uint64_t va = base_va + info->indirect_offset;
+                       int i;
+
+                       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+                                        (struct r600_resource *)info->indirect,
+                                        RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
+
+                       for (i = 0; i < 3; ++i) {
+                               radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+                               radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+                                               COPY_DATA_DST_SEL(COPY_DATA_REG));
+                               radeon_emit(cs, (va + 4 * i));
+                               radeon_emit(cs, (va + 4 * i) >> 32);
+                               radeon_emit(cs, (grid_size_reg >> 2) + i);
+                               radeon_emit(cs, 0);
+                       }
+               }
+       } else {
+               if (program->uses_grid_size) {
+                       radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
+                       radeon_emit(cs, info->grid[0]);
+                       radeon_emit(cs, info->grid[1]);
+                       radeon_emit(cs, info->grid[2]);
+               }
+               if (program->variable_group_size && program->uses_block_size) {
+                       radeon_set_sh_reg_seq(cs, block_size_reg, 3);
+                       radeon_emit(cs, info->block[0]);
+                       radeon_emit(cs, info->block[1]);
+                       radeon_emit(cs, info->block[2]);
+               }
+       }
+}
+
+static void si_emit_dispatch_packets(struct si_context *sctx,
+                                     const struct pipe_grid_info *info)
+{
+       struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+       bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
+       unsigned waves_per_threadgroup =
+               DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
+
+       radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
+                         S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0));
+
+       radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
+       radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
+       radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
+       radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
 
-       si_pm4_set_reg(pm4, R_00B810_COMPUTE_START_X, 0);
-       si_pm4_set_reg(pm4, R_00B814_COMPUTE_START_Y, 0);
-       si_pm4_set_reg(pm4, R_00B818_COMPUTE_START_Z, 0);
+       unsigned dispatch_initiator =
+               S_00B800_COMPUTE_SHADER_EN(1) |
+               S_00B800_FORCE_START_AT_000(1);
 
-       si_pm4_set_reg(pm4, R_00B81C_COMPUTE_NUM_THREAD_X,
-                               S_00B81C_NUM_THREAD_FULL(block_layout[0]));
-       si_pm4_set_reg(pm4, R_00B820_COMPUTE_NUM_THREAD_Y,
-                               S_00B820_NUM_THREAD_FULL(block_layout[1]));
-       si_pm4_set_reg(pm4, R_00B824_COMPUTE_NUM_THREAD_Z,
-                               S_00B824_NUM_THREAD_FULL(block_layout[2]));
+       if (info->indirect) {
+               uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+
+               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+                                (struct r600_resource *)info->indirect,
+                                RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
+
+               radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
+                               PKT3_SHADER_TYPE_S(1));
+               radeon_emit(cs, 1);
+               radeon_emit(cs, base_va);
+               radeon_emit(cs, base_va >> 32);
+
+               radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
+                               PKT3_SHADER_TYPE_S(1));
+               radeon_emit(cs, info->indirect_offset);
+               radeon_emit(cs, dispatch_initiator);
+       } else {
+               radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
+                               PKT3_SHADER_TYPE_S(1));
+               radeon_emit(cs, info->grid[0]);
+               radeon_emit(cs, info->grid[1]);
+               radeon_emit(cs, info->grid[2]);
+               radeon_emit(cs, dispatch_initiator);
+       }
+}
+
+
+static void si_launch_grid(
+               struct pipe_context *ctx, const struct pipe_grid_info *info)
+{
+       struct si_context *sctx = (struct si_context*)ctx;
+       struct si_compute *program = sctx->cs_shader_state.program;
+       const amd_kernel_code_t *code_object =
+               si_compute_get_code_object(program, info->pc);
+       int i;
+       /* HW bug workaround when CS threadgroups > 256 threads and async
+        * compute isn't used, i.e. only one compute job can run at a time.
+        * If async compute is possible, the threadgroup size must be limited
+        * to 256 threads on all queues to avoid the bug.
+        * Only SI and certain CIK chips are affected.
+        */
+       bool cs_regalloc_hang =
+               (sctx->b.chip_class == SI ||
+                sctx->b.family == CHIP_BONAIRE ||
+                sctx->b.family == CHIP_KABINI) &&
+               info->block[0] * info->block[1] * info->block[2] > 256;
+
+       if (cs_regalloc_hang)
+               sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+                                SI_CONTEXT_CS_PARTIAL_FLUSH;
+
+       if (program->ir_type == PIPE_SHADER_IR_TGSI &&
+           program->shader.compilation_failed)
+               return;
+
+       si_decompress_compute_textures(sctx);
+
+       /* Add buffer sizes for memory checking in need_cs_space. */
+       r600_context_add_resource_size(ctx, &program->shader.bo->b.b);
+       /* TODO: add the scratch buffer */
+
+       if (info->indirect) {
+               r600_context_add_resource_size(ctx, info->indirect);
+
+               /* Indirect buffers use TC L2 on GFX9, but not older hw. */
+               if (sctx->b.chip_class <= VI &&
+                   r600_resource(info->indirect)->TC_L2_dirty) {
+                       sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+                       r600_resource(info->indirect)->TC_L2_dirty = false;
+               }
+       }
+
+       si_need_cs_space(sctx);
+
+       if (!sctx->cs_shader_state.initialized)
+               si_initialize_compute(sctx);
+
+       if (sctx->b.flags)
+               si_emit_cache_flush(sctx);
+
+       if (!si_switch_compute_shader(sctx, program, &program->shader,
+                                       code_object, info->pc))
+               return;
+
+       si_upload_compute_shader_descriptors(sctx);
+       si_emit_compute_shader_userdata(sctx);
+
+       if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
+               sctx->atoms.s.render_cond->emit(&sctx->b,
+                                               sctx->atoms.s.render_cond);
+               si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
+       }
+
+       if ((program->input_size ||
+            program->ir_type == PIPE_SHADER_IR_NATIVE) &&
+           unlikely(!si_upload_compute_input(sctx, code_object, info))) {
+               return;
+       }
 
        /* Global buffers */
        for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
@@ -346,128 +834,46 @@ static void si_launch_grid(
                                          RADEON_PRIO_COMPUTE_GLOBAL);
        }
 
-       /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
-        * and is now per pipe, so it should be handled in the
-        * kernel if we want to use something other than the default value,
-        * which is now 0x22f.
-        */
-       if (sctx->b.chip_class <= SI) {
-               /* XXX: This should be:
-                * (number of compute units) * 4 * (waves per simd) - 1 */
+       if (program->ir_type == PIPE_SHADER_IR_TGSI)
+               si_setup_tgsi_grid(sctx, info);
 
-               si_pm4_set_reg(pm4, R_00B82C_COMPUTE_MAX_WAVE_ID,
-                                               0x190 /* Default value */);
-       }
+       si_ce_pre_draw_synchronization(sctx);
 
-       shader_va = shader->bo->gpu_address;
+       si_emit_dispatch_packets(sctx, info);
 
-#if HAVE_LLVM >= 0x0306
-       shader_va += pc;
-#endif
-       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
-                                 RADEON_USAGE_READ, RADEON_PRIO_USER_SHADER);
-       si_pm4_set_reg(pm4, R_00B830_COMPUTE_PGM_LO, shader_va >> 8);
-       si_pm4_set_reg(pm4, R_00B834_COMPUTE_PGM_HI, shader_va >> 40);
+       si_ce_post_draw_synchronization(sctx);
 
-       si_pm4_set_reg(pm4, R_00B848_COMPUTE_PGM_RSRC1, shader->config.rsrc1);
+       sctx->compute_is_busy = true;
+       sctx->b.num_compute_calls++;
+       if (sctx->cs_shader_state.uses_scratch)
+               sctx->b.num_spill_compute_calls++;
 
-       lds_blocks = shader->config.lds_size;
-       /* XXX: We are over allocating LDS.  For SI, the shader reports LDS in
-        * blocks of 256 bytes, so if there are 4 bytes lds allocated in
-        * the shader and 4 bytes allocated by the state tracker, then
-        * we will set LDS_SIZE to 512 bytes rather than 256.
-        */
-       if (sctx->b.chip_class <= SI) {
-               lds_blocks += align(program->local_size, 256) >> 8;
-       } else {
-               lds_blocks += align(program->local_size, 512) >> 9;
-       }
-
-       assert(lds_blocks <= 0xFF);
-
-       shader->config.rsrc2 &= C_00B84C_LDS_SIZE;
-       shader->config.rsrc2 |=  S_00B84C_LDS_SIZE(lds_blocks);
-
-       si_pm4_set_reg(pm4, R_00B84C_COMPUTE_PGM_RSRC2, shader->config.rsrc2);
-       si_pm4_set_reg(pm4, R_00B854_COMPUTE_RESOURCE_LIMITS, 0);
-
-       si_pm4_set_reg(pm4, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0,
-               S_00B858_SH0_CU_EN(0xffff /* Default value */)
-               | S_00B858_SH1_CU_EN(0xffff /* Default value */))
-               ;
-
-       si_pm4_set_reg(pm4, R_00B85C_COMPUTE_STATIC_THREAD_MGMT_SE1,
-               S_00B85C_SH0_CU_EN(0xffff /* Default value */)
-               | S_00B85C_SH1_CU_EN(0xffff /* Default value */))
-               ;
-
-       num_waves_for_scratch =
-               MIN2(num_waves_for_scratch,
-                    32 * sctx->screen->b.info.num_good_compute_units);
-       si_pm4_set_reg(pm4, R_00B860_COMPUTE_TMPRING_SIZE,
-               /* The maximum value for WAVES is 32 * num CU.
-                * If you program this value incorrectly, the GPU will hang if
-                * COMPUTE_PGM_RSRC2.SCRATCH_EN is enabled.
-                */
-               S_00B860_WAVES(num_waves_for_scratch)
-               | S_00B860_WAVESIZE(shader->config.scratch_bytes_per_wave >> 10))
-               ;
-
-       si_pm4_cmd_begin(pm4, PKT3_DISPATCH_DIRECT);
-       si_pm4_cmd_add(pm4, grid_layout[0]); /* Thread groups DIM_X */
-       si_pm4_cmd_add(pm4, grid_layout[1]); /* Thread groups DIM_Y */
-       si_pm4_cmd_add(pm4, grid_layout[2]); /* Thread gropus DIM_Z */
-       si_pm4_cmd_add(pm4, 1); /* DISPATCH_INITIATOR */
-        si_pm4_cmd_end(pm4, false);
-
-       si_pm4_emit(sctx, pm4);
-
-#if 0
-       fprintf(stderr, "cdw: %i\n", sctx->cs->cdw);
-       for (i = 0; i < sctx->cs->cdw; i++) {
-               fprintf(stderr, "%4i : 0x%08X\n", i, sctx->cs->buf[i]);
-       }
-#endif
-
-       si_pm4_free_state(sctx, pm4, ~0);
-
-       sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
-                        SI_CONTEXT_INV_VMEM_L1 |
-                        SI_CONTEXT_INV_GLOBAL_L2 |
-                        SI_CONTEXT_INV_ICACHE |
-                        SI_CONTEXT_INV_SMEM_L1 |
-                        SI_CONTEXT_FLAG_COMPUTE;
-       si_emit_cache_flush(sctx, NULL);
+       if (cs_regalloc_hang)
+               sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
 }
 
 
 static void si_delete_compute_state(struct pipe_context *ctx, void* state){
        struct si_compute *program = (struct si_compute *)state;
+       struct si_context *sctx = (struct si_context*)ctx;
 
        if (!state) {
                return;
        }
 
-#if HAVE_LLVM < 0x0306
-       if (program->kernels) {
-               for (int i = 0; i < program->num_kernels; i++){
-                       if (program->kernels[i].bo){
-                               si_shader_destroy(&program->kernels[i]);
-                       }
-               }
-               FREE(program->kernels);
+       if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+               util_queue_drop_job(&sctx->screen->shader_compiler_queue,
+                                   &program->ready);
+               util_queue_fence_destroy(&program->ready);
        }
 
-       if (program->llvm_ctx){
-               LLVMContextDispose(program->llvm_ctx);
-       }
-#else
-       si_shader_destroy(&program->shader);
-#endif
+       if (program == sctx->cs_shader_state.program)
+               sctx->cs_shader_state.program = NULL;
 
-       pipe_resource_reference(
-               (struct pipe_resource **)&program->input_buffer, NULL);
+       if (program == sctx->cs_shader_state.emitted_program)
+               sctx->cs_shader_state.emitted_program = NULL;
 
+       si_shader_destroy(&program->shader);
        FREE(program);
 }