gallium: add AMD-specific compute TGSI enums
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
index 87addd53976e08a461bd44b3507a307523d381e5..22975069c999be515daf341c9d3031e9c8910a78 100644 (file)
  *
  */
 
+#include "nir/tgsi_to_nir.h"
 #include "tgsi/tgsi_parse.h"
 #include "util/u_async_debug.h"
 #include "util/u_memory.h"
 #include "util/u_upload_mgr.h"
 
+#include "ac_rtld.h"
 #include "amd_kernel_code_t.h"
 #include "si_build_pm4.h"
 #include "si_compute.h"
@@ -58,15 +60,39 @@ static const amd_kernel_code_t *si_compute_get_code_object(
        const struct si_compute *program,
        uint64_t symbol_offset)
 {
-       if (!program->use_code_object_v2) {
+       const struct si_shader_selector *sel = &program->sel;
+
+       if (program->ir_type != PIPE_SHADER_IR_NATIVE)
                return NULL;
-       }
-       return (const amd_kernel_code_t*)
-               (program->shader.binary.code + symbol_offset);
+
+       struct ac_rtld_binary rtld;
+       if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
+                       .info = &sel->screen->info,
+                       .shader_type = MESA_SHADER_COMPUTE,
+                       .wave_size = sel->screen->compute_wave_size,
+                       .num_parts = 1,
+                       .elf_ptrs = &program->shader.binary.elf_buffer,
+                       .elf_sizes = &program->shader.binary.elf_size }))
+               return NULL;
+
+       const amd_kernel_code_t *result = NULL;
+       const char *text;
+       size_t size;
+       if (!ac_rtld_get_section_by_name(&rtld, ".text", &text, &size))
+               goto out;
+
+       if (symbol_offset + sizeof(amd_kernel_code_t) > size)
+               goto out;
+
+       result = (const amd_kernel_code_t*)(text + symbol_offset);
+
+out:
+       ac_rtld_close(&rtld);
+       return result;
 }
 
 static void code_object_to_config(const amd_kernel_code_t *code_object,
-                                 struct si_shader_config *out_config) {
+                                 struct ac_shader_config *out_config) {
 
        uint32_t rsrc1 = code_object->compute_pgm_resource_registers;
        uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32;
@@ -84,54 +110,44 @@ static void code_object_to_config(const amd_kernel_code_t *code_object,
 static void si_create_compute_state_async(void *job, int thread_index)
 {
        struct si_compute *program = (struct si_compute *)job;
+       struct si_shader_selector *sel = &program->sel;
        struct si_shader *shader = &program->shader;
-       struct si_shader_selector sel;
        struct ac_llvm_compiler *compiler;
-       struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug;
-       struct si_screen *sscreen = program->screen;
+       struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug;
+       struct si_screen *sscreen = sel->screen;
 
        assert(!debug->debug_message || debug->async);
        assert(thread_index >= 0);
        assert(thread_index < ARRAY_SIZE(sscreen->compiler));
        compiler = &sscreen->compiler[thread_index];
 
-       memset(&sel, 0, sizeof(sel));
-
-       sel.screen = sscreen;
-
        if (program->ir_type == PIPE_SHADER_IR_TGSI) {
-               tgsi_scan_shader(program->ir.tgsi, &sel.info);
-               sel.tokens = program->ir.tgsi;
+               tgsi_scan_shader(sel->tokens, &sel->info);
        } else {
                assert(program->ir_type == PIPE_SHADER_IR_NIR);
-               sel.nir = program->ir.nir;
 
-               si_nir_scan_shader(sel.nir, &sel.info);
-               si_lower_nir(&sel);
+               si_nir_opts(sel->nir);
+               si_nir_scan_shader(sel->nir, &sel->info);
+               si_lower_nir(sel, sscreen->compute_wave_size);
        }
 
        /* Store the declared LDS size into tgsi_shader_info for the shader
         * cache to include it.
         */
-       sel.info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE] = program->local_size;
+       sel->info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE] = program->local_size;
 
-       sel.type = PIPE_SHADER_COMPUTE;
-       si_get_active_slot_masks(&sel.info,
-                                &program->active_const_and_shader_buffers,
-                                &program->active_samplers_and_images);
+       si_get_active_slot_masks(&sel->info,
+                                &sel->active_const_and_shader_buffers,
+                                &sel->active_samplers_and_images);
 
-       program->shader.selector = &sel;
        program->shader.is_monolithic = true;
-       program->uses_grid_size = sel.info.uses_grid_size;
-       program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
-       program->uses_bindless_images = sel.info.uses_bindless_images;
        program->reads_variable_block_size =
-               sel.info.uses_block_size &&
-               sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
+               sel->info.uses_block_size &&
+               sel->info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
        program->num_cs_user_data_dwords =
-               sel.info.properties[TGSI_PROPERTY_CS_USER_DATA_DWORDS];
+               sel->info.properties[TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD];
 
-       void *ir_binary = si_get_ir_binary(&sel);
+       void *ir_binary = si_get_ir_binary(sel);
 
        /* Try to load the shader from the shader cache. */
        mtx_lock(&sscreen->shader_cache_mutex);
@@ -140,44 +156,49 @@ static void si_create_compute_state_async(void *job, int thread_index)
            si_shader_cache_load_shader(sscreen, ir_binary, shader)) {
                mtx_unlock(&sscreen->shader_cache_mutex);
 
-               si_shader_dump_stats_for_shader_db(shader, debug);
-               si_shader_dump(sscreen, shader, debug, PIPE_SHADER_COMPUTE,
-                              stderr, true);
+               si_shader_dump_stats_for_shader_db(sscreen, shader, debug);
+               si_shader_dump(sscreen, shader, debug, stderr, true);
 
-               if (si_shader_binary_upload(sscreen, shader))
+               if (!si_shader_binary_upload(sscreen, shader, 0))
                        program->shader.compilation_failed = true;
        } else {
                mtx_unlock(&sscreen->shader_cache_mutex);
 
-               if (si_shader_create(sscreen, compiler, &program->shader, debug)) {
+               if (!si_shader_create(sscreen, compiler, &program->shader, debug)) {
                        program->shader.compilation_failed = true;
 
                        if (program->ir_type == PIPE_SHADER_IR_TGSI)
-                               FREE(program->ir.tgsi);
-                       program->shader.selector = NULL;
+                               FREE(sel->tokens);
                        return;
                }
 
                bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
                unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS +
-                                     (sel.info.uses_grid_size ? 3 : 0) +
+                                     (sel->info.uses_grid_size ? 3 : 0) +
                                      (program->reads_variable_block_size ? 3 : 0) +
                                      program->num_cs_user_data_dwords;
 
                shader->config.rsrc1 =
-                       S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
-                       S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
+                       S_00B848_VGPRS((shader->config.num_vgprs - 1) /
+                                      (sscreen->compute_wave_size == 32 ? 8 : 4)) |
                        S_00B848_DX10_CLAMP(1) |
+                       S_00B848_MEM_ORDERED(sscreen->info.chip_class >= GFX10) |
+                       S_00B848_WGP_MODE(sscreen->info.chip_class >= GFX10) |
                        S_00B848_FLOAT_MODE(shader->config.float_mode);
 
+               if (sscreen->info.chip_class < GFX10) {
+                       shader->config.rsrc1 |=
+                               S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8);
+               }
+
                shader->config.rsrc2 =
                        S_00B84C_USER_SGPR(user_sgprs) |
                        S_00B84C_SCRATCH_EN(scratch_enabled) |
-                       S_00B84C_TGID_X_EN(sel.info.uses_block_id[0]) |
-                       S_00B84C_TGID_Y_EN(sel.info.uses_block_id[1]) |
-                       S_00B84C_TGID_Z_EN(sel.info.uses_block_id[2]) |
-                       S_00B84C_TIDIG_COMP_CNT(sel.info.uses_thread_id[2] ? 2 :
-                                               sel.info.uses_thread_id[1] ? 1 : 0) |
+                       S_00B84C_TGID_X_EN(sel->info.uses_block_id[0]) |
+                       S_00B84C_TGID_Y_EN(sel->info.uses_block_id[1]) |
+                       S_00B84C_TGID_Z_EN(sel->info.uses_block_id[2]) |
+                       S_00B84C_TIDIG_COMP_CNT(sel->info.uses_thread_id[2] ? 2 :
+                                               sel->info.uses_thread_id[1] ? 1 : 0) |
                        S_00B84C_LDS_SIZE(shader->config.lds_size);
 
                if (ir_binary) {
@@ -189,9 +210,7 @@ static void si_create_compute_state_async(void *job, int thread_index)
        }
 
        if (program->ir_type == PIPE_SHADER_IR_TGSI)
-               FREE(program->ir.tgsi);
-
-       program->shader.selector = NULL;
+               FREE(sel->tokens);
 }
 
 static void *si_create_compute_state(
@@ -201,34 +220,40 @@ static void *si_create_compute_state(
        struct si_context *sctx = (struct si_context *)ctx;
        struct si_screen *sscreen = (struct si_screen *)ctx->screen;
        struct si_compute *program = CALLOC_STRUCT(si_compute);
+       struct si_shader_selector *sel = &program->sel;
 
-       pipe_reference_init(&program->reference, 1);
-       program->screen = (struct si_screen *)ctx->screen;
+       pipe_reference_init(&sel->reference, 1);
+       sel->type = PIPE_SHADER_COMPUTE;
+       sel->screen = sscreen;
+       program->shader.selector = &program->sel;
        program->ir_type = cso->ir_type;
        program->local_size = cso->req_local_mem;
        program->private_size = cso->req_private_mem;
        program->input_size = cso->req_input_mem;
-       program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE;
 
        if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
-               if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
-                       program->ir.tgsi = tgsi_dup_tokens(cso->prog);
-                       if (!program->ir.tgsi) {
+               if (sscreen->options.always_nir &&
+                   cso->ir_type == PIPE_SHADER_IR_TGSI) {
+                       program->ir_type = PIPE_SHADER_IR_NIR;
+                       sel->nir = tgsi_to_nir(cso->prog, ctx->screen);
+               } else if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
+                       sel->tokens = tgsi_dup_tokens(cso->prog);
+                       if (!sel->tokens) {
                                FREE(program);
                                return NULL;
                        }
                } else {
                        assert(cso->ir_type == PIPE_SHADER_IR_NIR);
-                       program->ir.nir = (struct nir_shader *) cso->prog;
+                       sel->nir = (struct nir_shader *) cso->prog;
                }
 
-               program->compiler_ctx_state.debug = sctx->debug;
-               program->compiler_ctx_state.is_debug_context = sctx->is_debug;
+               sel->compiler_ctx_state.debug = sctx->debug;
+               sel->compiler_ctx_state.is_debug_context = sctx->is_debug;
                p_atomic_inc(&sscreen->num_shaders_created);
 
                si_schedule_initial_compile(sctx, PIPE_SHADER_COMPUTE,
-                                           &program->ready,
-                                           &program->compiler_ctx_state,
+                                           &sel->ready,
+                                           &sel->compiler_ctx_state,
                                            program, si_create_compute_state_async);
        } else {
                const struct pipe_llvm_program_header *header;
@@ -236,25 +261,22 @@ static void *si_create_compute_state(
                header = cso->prog;
                code = cso->prog + sizeof(struct pipe_llvm_program_header);
 
-               ac_elf_read(code, header->num_bytes, &program->shader.binary);
-               if (program->use_code_object_v2) {
-                       const amd_kernel_code_t *code_object =
-                               si_compute_get_code_object(program, 0);
-                       code_object_to_config(code_object, &program->shader.config);
-                       if (program->shader.binary.reloc_count != 0) {
-                               fprintf(stderr, "Error: %d unsupported relocations\n",
-                                       program->shader.binary.reloc_count);
-                               FREE(program);
-                               return NULL;
-                       }
-               } else {
-                       si_shader_binary_read_config(&program->shader.binary,
-                                    &program->shader.config, 0);
+               program->shader.binary.elf_size = header->num_bytes;
+               program->shader.binary.elf_buffer = malloc(header->num_bytes);
+               if (!program->shader.binary.elf_buffer) {
+                       FREE(program);
+                       return NULL;
                }
-               si_shader_dump(sctx->screen, &program->shader, &sctx->debug,
-                              PIPE_SHADER_COMPUTE, stderr, true);
-               if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
+               memcpy((void *)program->shader.binary.elf_buffer, code, header->num_bytes);
+
+               const amd_kernel_code_t *code_object =
+                       si_compute_get_code_object(program, 0);
+               code_object_to_config(code_object, &program->shader.config);
+
+               si_shader_dump(sctx->screen, &program->shader, &sctx->debug, stderr, true);
+               if (!si_shader_binary_upload(sctx->screen, &program->shader, 0)) {
                        fprintf(stderr, "LLVM failed to upload shader\n");
+                       free((void *)program->shader.binary.elf_buffer);
                        FREE(program);
                        return NULL;
                }
@@ -267,6 +289,7 @@ static void si_bind_compute_state(struct pipe_context *ctx, void *state)
 {
        struct si_context *sctx = (struct si_context*)ctx;
        struct si_compute *program = (struct si_compute*)state;
+       struct si_shader_selector *sel = &program->sel;
 
        sctx->cs_shader_state.program = program;
        if (!program)
@@ -274,16 +297,16 @@ static void si_bind_compute_state(struct pipe_context *ctx, void *state)
 
        /* Wait because we need active slot usage masks. */
        if (program->ir_type != PIPE_SHADER_IR_NATIVE)
-               util_queue_fence_wait(&program->ready);
+               util_queue_fence_wait(&sel->ready);
 
        si_set_active_descriptors(sctx,
                                  SI_DESCS_FIRST_COMPUTE +
                                  SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
-                                 program->active_const_and_shader_buffers);
+                                 sel->active_const_and_shader_buffers);
        si_set_active_descriptors(sctx,
                                  SI_DESCS_FIRST_COMPUTE +
                                  SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
-                                 program->active_samplers_and_images);
+                                 sel->active_samplers_and_images);
 }
 
 static void si_set_global_binding(
@@ -316,32 +339,35 @@ static void si_set_global_binding(
        }
 }
 
-static void si_initialize_compute(struct si_context *sctx)
+void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf *cs)
 {
-       struct radeon_cmdbuf *cs = sctx->gfx_cs;
        uint64_t bc_va;
 
        radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
-       /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
+       /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1,
+        * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */
+       radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
        radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
-       radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
 
-       if (sctx->chip_class >= CIK) {
+       if (sctx->chip_class >= GFX7) {
                /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
                radeon_set_sh_reg_seq(cs,
                                     R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
-               radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) |
-                               S_00B864_SH1_CU_EN(0xffff));
-               radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) |
-                               S_00B868_SH1_CU_EN(0xffff));
+               radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) |
+                               S_00B858_SH1_CU_EN(0xffff));
+               radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) |
+                               S_00B858_SH1_CU_EN(0xffff));
        }
 
+       if (sctx->chip_class >= GFX10)
+               radeon_set_sh_reg(cs, R_00B8A0_COMPUTE_PGM_RSRC3, 0);
+
        /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
         * and is now per pipe, so it should be handled in the
         * kernel if we want to use something other than the default value,
         * which is now 0x22f.
         */
-       if (sctx->chip_class <= SI) {
+       if (sctx->chip_class <= GFX6) {
                /* XXX: This should be:
                 * (number of compute units) * 4 * (waves per simd) - 1 */
 
@@ -352,7 +378,7 @@ static void si_initialize_compute(struct si_context *sctx)
        /* Set the pointer to border colors. */
        bc_va = sctx->border_color_buffer->gpu_address;
 
-       if (sctx->chip_class >= CIK) {
+       if (sctx->chip_class >= GFX7) {
                radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
                radeon_emit(cs, bc_va >> 8);  /* R_030E00_TA_CS_BC_BASE_ADDR */
                radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
@@ -362,14 +388,11 @@ static void si_initialize_compute(struct si_context *sctx)
                                              bc_va >> 8);
                }
        }
-
-       sctx->cs_shader_state.emitted_program = NULL;
-       sctx->cs_shader_state.initialized = true;
 }
 
 static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
                                             struct si_shader *shader,
-                                            struct si_shader_config *config)
+                                            struct ac_shader_config *config)
 {
        uint64_t scratch_bo_size, scratch_needed;
        scratch_bo_size = 0;
@@ -393,9 +416,7 @@ static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
        if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
                uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
 
-               si_shader_apply_scratch_relocs(shader, scratch_va);
-
-               if (si_shader_binary_upload(sctx->screen, shader))
+               if (!si_shader_binary_upload(sctx->screen, shader, scratch_va))
                        return false;
 
                si_resource_reference(&shader->scratch_bo,
@@ -412,8 +433,8 @@ static bool si_switch_compute_shader(struct si_context *sctx,
                                     unsigned offset)
 {
        struct radeon_cmdbuf *cs = sctx->gfx_cs;
-       struct si_shader_config inline_config = {0};
-       struct si_shader_config *config;
+       struct ac_shader_config inline_config = {0};
+       struct ac_shader_config *config;
        uint64_t shader_va;
 
        if (sctx->cs_shader_state.emitted_program == program &&
@@ -426,19 +447,15 @@ static bool si_switch_compute_shader(struct si_context *sctx,
                unsigned lds_blocks;
 
                config = &inline_config;
-               if (code_object) {
-                       code_object_to_config(code_object, config);
-               } else {
-                       si_shader_binary_read_config(&shader->binary, config, offset);
-               }
+               code_object_to_config(code_object, config);
 
                lds_blocks = config->lds_size;
-               /* XXX: We are over allocating LDS.  For SI, the shader reports
+               /* XXX: We are over allocating LDS.  For GFX6, the shader reports
                * LDS in blocks of 256 bytes, so if there are 4 bytes lds
                * allocated in the shader and 4 bytes allocated by the state
                * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
                */
-               if (sctx->chip_class <= SI) {
+               if (sctx->chip_class <= GFX6) {
                        lds_blocks += align(program->local_size, 256) >> 8;
                } else {
                        lds_blocks += align(program->local_size, 512) >> 9;
@@ -473,13 +490,13 @@ static bool si_switch_compute_shader(struct si_context *sctx,
         * command. However, that would add more complexity and we're likely
         * to get a shader state change in that case anyway.
         */
-       if (sctx->chip_class >= CIK) {
+       if (sctx->chip_class >= GFX7) {
                cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
                                         0, program->shader.bo->b.b.width0);
        }
 
        shader_va = shader->bo->gpu_address + offset;
-       if (program->use_code_object_v2) {
+       if (program->ir_type == PIPE_SHADER_IR_NATIVE) {
                /* Shader code is placed after the amd_kernel_code_t
                 * struct. */
                shader_va += sizeof(amd_kernel_code_t);
@@ -538,7 +555,7 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
        } else {
                scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
 
-               if (sctx->chip_class < VI) {
+               if (sctx->chip_class < GFX8) {
                        /* BUF_DATA_FORMAT is ignored, but it cannot be
                         * BUF_DATA_FORMAT_INVALID. */
                        scratch_dword3 |=
@@ -649,21 +666,14 @@ static bool si_upload_compute_input(struct si_context *sctx,
                                    const amd_kernel_code_t *code_object,
                                    const struct pipe_grid_info *info)
 {
-       struct radeon_cmdbuf *cs = sctx->gfx_cs;
        struct si_compute *program = sctx->cs_shader_state.program;
        struct si_resource *input_buffer = NULL;
-       unsigned kernel_args_size;
-       unsigned num_work_size_bytes = program->use_code_object_v2 ? 0 : 36;
        uint32_t kernel_args_offset = 0;
        uint32_t *kernel_args;
        void *kernel_args_ptr;
        uint64_t kernel_args_va;
-       unsigned i;
 
-       /* The extra num_work_size_bytes are for work group / work item size information */
-       kernel_args_size = program->input_size + num_work_size_bytes;
-
-       u_upload_alloc(sctx->b.const_uploader, 0, kernel_args_size,
+       u_upload_alloc(sctx->b.const_uploader, 0, program->input_size,
                       sctx->screen->info.tcc_cache_line_size,
                       &kernel_args_offset,
                       (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
@@ -674,38 +684,18 @@ static bool si_upload_compute_input(struct si_context *sctx,
        kernel_args = (uint32_t*)kernel_args_ptr;
        kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
 
-       if (!code_object) {
-               for (i = 0; i < 3; i++) {
-                       kernel_args[i] = util_cpu_to_le32(info->grid[i]);
-                       kernel_args[i + 3] = util_cpu_to_le32(info->grid[i] * info->block[i]);
-                       kernel_args[i + 6] = util_cpu_to_le32(info->block[i]);
-               }
-       }
-
-       memcpy(kernel_args + (num_work_size_bytes / 4), info->input,
-              program->input_size);
-
+       memcpy(kernel_args, info->input, program->input_size);
 
-       for (i = 0; i < (kernel_args_size / 4); i++) {
+       for (unsigned i = 0; i < program->input_size / 4; i++) {
                COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
                        kernel_args[i]);
        }
 
-
        radeon_add_to_buffer_list(sctx, sctx->gfx_cs, input_buffer,
                                  RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
 
-       if (code_object) {
-               si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
-       } else {
-               radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
-               radeon_emit(cs, kernel_args_va);
-               radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
-                               S_008F04_STRIDE(0));
-       }
-
+       si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
        si_resource_reference(&input_buffer, NULL);
-
        return true;
 }
 
@@ -713,37 +703,27 @@ static void si_setup_tgsi_user_data(struct si_context *sctx,
                                 const struct pipe_grid_info *info)
 {
        struct si_compute *program = sctx->cs_shader_state.program;
+       struct si_shader_selector *sel = &program->sel;
        struct radeon_cmdbuf *cs = sctx->gfx_cs;
        unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
                                 4 * SI_NUM_RESOURCE_SGPRS;
        unsigned block_size_reg = grid_size_reg +
                                  /* 12 bytes = 3 dwords. */
-                                 12 * program->uses_grid_size;
+                                 12 * sel->info.uses_grid_size;
        unsigned cs_user_data_reg = block_size_reg +
                                    12 * program->reads_variable_block_size;
 
        if (info->indirect) {
-               if (program->uses_grid_size) {
-                       uint64_t base_va = si_resource(info->indirect)->gpu_address;
-                       uint64_t va = base_va + info->indirect_offset;
-                       int i;
-
-                       radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
-                                        si_resource(info->indirect),
-                                        RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
-
-                       for (i = 0; i < 3; ++i) {
-                               radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
-                               radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
-                                               COPY_DATA_DST_SEL(COPY_DATA_REG));
-                               radeon_emit(cs, (va + 4 * i));
-                               radeon_emit(cs, (va + 4 * i) >> 32);
-                               radeon_emit(cs, (grid_size_reg >> 2) + i);
-                               radeon_emit(cs, 0);
+               if (sel->info.uses_grid_size) {
+                       for (unsigned i = 0; i < 3; ++i) {
+                               si_cp_copy_data(sctx, sctx->gfx_cs,
+                                               COPY_DATA_REG, NULL, (grid_size_reg >> 2) + i,
+                                               COPY_DATA_SRC_MEM, si_resource(info->indirect),
+                                               info->indirect_offset + 4 * i);
                        }
                }
        } else {
-               if (program->uses_grid_size) {
+               if (sel->info.uses_grid_size) {
                        radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
                        radeon_emit(cs, info->grid[0]);
                        radeon_emit(cs, info->grid[1]);
@@ -769,42 +749,30 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
        struct si_screen *sscreen = sctx->screen;
        struct radeon_cmdbuf *cs = sctx->gfx_cs;
        bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
+       unsigned threads_per_threadgroup =
+               info->block[0] * info->block[1] * info->block[2];
        unsigned waves_per_threadgroup =
-               DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
-       unsigned compute_resource_limits =
-               S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
-
-       if (sctx->chip_class >= CIK) {
-               unsigned num_cu_per_se = sscreen->info.num_good_compute_units /
-                                        sscreen->info.max_se;
-
-               /* Force even distribution on all SIMDs in CU if the workgroup
-                * size is 64. This has shown some good improvements if # of CUs
-                * per SE is not a multiple of 4.
-                */
-               if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
-                       compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
-
-               compute_resource_limits |= S_00B854_WAVES_PER_SH(sctx->cs_max_waves_per_sh);
-       } else {
-               /* SI */
-               if (sctx->cs_max_waves_per_sh) {
-                       unsigned limit_div16 = DIV_ROUND_UP(sctx->cs_max_waves_per_sh, 16);
-                       compute_resource_limits |= S_00B854_WAVES_PER_SH_SI(limit_div16);
-               }
-       }
+               DIV_ROUND_UP(threads_per_threadgroup, sscreen->compute_wave_size);
+       unsigned threadgroups_per_cu = 1;
+
+       if (sctx->chip_class >= GFX10 && waves_per_threadgroup == 1)
+               threadgroups_per_cu = 2;
 
        radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
-                         compute_resource_limits);
+                         ac_get_compute_resource_limits(&sscreen->info,
+                                                        waves_per_threadgroup,
+                                                        sctx->cs_max_waves_per_sh,
+                                                        threadgroups_per_cu));
 
        unsigned dispatch_initiator =
                S_00B800_COMPUTE_SHADER_EN(1) |
                S_00B800_FORCE_START_AT_000(1) |
                /* If the KMD allows it (there is a KMD hw register for it),
                 * allow launching waves out-of-order. (same as Vulkan) */
-               S_00B800_ORDER_MODE(sctx->chip_class >= CIK);
+               S_00B800_ORDER_MODE(sctx->chip_class >= GFX7) |
+               S_00B800_CS_W32_EN(sscreen->compute_wave_size == 32);
 
-       uint *last_block = sctx->compute_last_block;
+       const uint *last_block = info->last_block;
        bool partial_block_en = last_block[0] || last_block[1] || last_block[2];
 
        radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
@@ -871,10 +839,10 @@ static void si_launch_grid(
         * compute isn't used, i.e. only one compute job can run at a time.
         * If async compute is possible, the threadgroup size must be limited
         * to 256 threads on all queues to avoid the bug.
-        * Only SI and certain CIK chips are affected.
+        * Only GFX6 and certain GFX7 chips are affected.
         */
        bool cs_regalloc_hang =
-               (sctx->chip_class == SI ||
+               (sctx->chip_class == GFX6 ||
                 sctx->family == CHIP_BONAIRE ||
                 sctx->family == CHIP_KABINI) &&
                info->block[0] * info->block[1] * info->block[2] > 256;
@@ -904,20 +872,27 @@ static void si_launch_grid(
                si_context_add_resource_size(sctx, info->indirect);
 
                /* Indirect buffers use TC L2 on GFX9, but not older hw. */
-               if (sctx->chip_class <= VI &&
+               if (sctx->chip_class <= GFX8 &&
                    si_resource(info->indirect)->TC_L2_dirty) {
-                       sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+                       sctx->flags |= SI_CONTEXT_WB_L2;
                        si_resource(info->indirect)->TC_L2_dirty = false;
                }
        }
 
        si_need_gfx_cs_space(sctx);
 
-       if (!sctx->cs_shader_state.initialized)
-               si_initialize_compute(sctx);
+       if (sctx->bo_list_add_all_compute_resources)
+               si_compute_resources_add_all_to_bo_list(sctx);
+
+       if (!sctx->cs_shader_state.initialized) {
+               si_emit_initial_compute_regs(sctx, sctx->gfx_cs);
+
+               sctx->cs_shader_state.emitted_program = NULL;
+               sctx->cs_shader_state.initialized = true;
+       }
 
        if (sctx->flags)
-               si_emit_cache_flush(sctx);
+               sctx->emit_cache_flush(sctx);
 
        if (!si_switch_compute_shader(sctx, program, &program->shader,
                                        code_object, info->pc))
@@ -932,11 +907,9 @@ static void si_launch_grid(
                si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false);
        }
 
-       if ((program->input_size ||
-            program->ir_type == PIPE_SHADER_IR_NATIVE) &&
-           unlikely(!si_upload_compute_input(sctx, code_object, info))) {
+       if (program->ir_type == PIPE_SHADER_IR_NATIVE &&
+           unlikely(!si_upload_compute_input(sctx, code_object, info)))
                return;
-       }
 
        /* Global buffers */
        for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
@@ -971,13 +944,16 @@ static void si_launch_grid(
 
 void si_destroy_compute(struct si_compute *program)
 {
+       struct si_shader_selector *sel = &program->sel;
+
        if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
-               util_queue_drop_job(&program->screen->shader_compiler_queue,
-                                   &program->ready);
-               util_queue_fence_destroy(&program->ready);
+               util_queue_drop_job(&sel->screen->shader_compiler_queue,
+                                   &sel->ready);
+               util_queue_fence_destroy(&sel->ready);
        }
 
        si_shader_destroy(&program->shader);
+       ralloc_free(program->sel.nir);
        FREE(program);
 }