radeonsi: set COMPUTE_DISPATCH_INITIATOR.ORDER_MODE = 1
[mesa.git] / src / gallium / drivers / radeonsi / si_compute.c
index 9d83cb3a7361891593843e973023824c5f37c6a9..fba02faeb5a79f8b03ffb8b6a79e373e63d5d0c4 100644 (file)
 #include "tgsi/tgsi_parse.h"
 #include "util/u_memory.h"
 #include "util/u_upload_mgr.h"
-#include "radeon/radeon_elf_util.h"
 
 #include "amd_kernel_code_t.h"
 #include "radeon/r600_cs.h"
 #include "si_pipe.h"
+#include "si_compute.h"
 #include "sid.h"
 
-#define MAX_GLOBAL_BUFFERS 20
-
-struct si_compute {
-       unsigned ir_type;
-       unsigned local_size;
-       unsigned private_size;
-       unsigned input_size;
-       struct si_shader shader;
-
-       struct pipe_resource *global_buffers[MAX_GLOBAL_BUFFERS];
-       unsigned use_code_object_v2 : 1;
-       unsigned variable_group_size : 1;
-};
-
 struct dispatch_packet {
        uint16_t header;
        uint16_t setup;
@@ -89,6 +75,74 @@ static void code_object_to_config(const amd_kernel_code_t *code_object,
                align(code_object->workitem_private_segment_byte_size * 64, 1024);
 }
 
+/* Asynchronous compute shader compilation. */
+static void si_create_compute_state_async(void *job, int thread_index)
+{
+       struct si_compute *program = (struct si_compute *)job;
+       struct si_shader *shader = &program->shader;
+       struct si_shader_selector sel;
+       LLVMTargetMachineRef tm;
+       struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug;
+
+       if (thread_index >= 0) {
+               assert(thread_index < ARRAY_SIZE(program->screen->tm));
+               tm = program->screen->tm[thread_index];
+               if (!debug->async)
+                       debug = NULL;
+       } else {
+               tm = program->compiler_ctx_state.tm;
+       }
+
+       memset(&sel, 0, sizeof(sel));
+
+       sel.screen = program->screen;
+       tgsi_scan_shader(program->tokens, &sel.info);
+       sel.tokens = program->tokens;
+       sel.type = PIPE_SHADER_COMPUTE;
+       sel.local_size = program->local_size;
+       si_get_active_slot_masks(&sel.info,
+                                &program->active_const_and_shader_buffers,
+                                &program->active_samplers_and_images);
+
+       program->shader.selector = &sel;
+       program->shader.is_monolithic = true;
+       program->uses_grid_size = sel.info.uses_grid_size;
+       program->uses_block_size = sel.info.uses_block_size;
+       program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
+       program->uses_bindless_images = sel.info.uses_bindless_images;
+
+       if (si_shader_create(program->screen, tm, &program->shader, debug)) {
+               program->shader.compilation_failed = true;
+       } else {
+               bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
+               unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS +
+                                     (sel.info.uses_grid_size ? 3 : 0) +
+                                     (sel.info.uses_block_size ? 3 : 0);
+
+               shader->config.rsrc1 =
+                       S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
+                       S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
+                       S_00B848_DX10_CLAMP(1) |
+                       S_00B848_FLOAT_MODE(shader->config.float_mode);
+
+               shader->config.rsrc2 =
+                       S_00B84C_USER_SGPR(user_sgprs) |
+                       S_00B84C_SCRATCH_EN(scratch_enabled) |
+                       S_00B84C_TGID_X_EN(sel.info.uses_block_id[0]) |
+                       S_00B84C_TGID_Y_EN(sel.info.uses_block_id[1]) |
+                       S_00B84C_TGID_Z_EN(sel.info.uses_block_id[2]) |
+                       S_00B84C_TIDIG_COMP_CNT(sel.info.uses_thread_id[2] ? 2 :
+                                               sel.info.uses_thread_id[1] ? 1 : 0) |
+                       S_00B84C_LDS_SIZE(shader->config.lds_size);
+
+               program->variable_group_size =
+                       sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
+       }
+
+       FREE(program->tokens);
+       program->shader.selector = NULL;
+}
+
 static void *si_create_compute_state(
        struct pipe_context *ctx,
        const struct pipe_compute_state *cso)
@@ -96,9 +150,8 @@ static void *si_create_compute_state(
        struct si_context *sctx = (struct si_context *)ctx;
        struct si_screen *sscreen = (struct si_screen *)ctx->screen;
        struct si_compute *program = CALLOC_STRUCT(si_compute);
-       struct si_shader *shader = &program->shader;
-
 
+       program->screen = (struct si_screen *)ctx->screen;
        program->ir_type = cso->ir_type;
        program->local_size = cso->req_local_mem;
        program->private_size = cso->req_private_mem;
@@ -106,61 +159,34 @@ static void *si_create_compute_state(
        program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
                                        cso->ir_type == PIPE_SHADER_IR_NATIVE;
 
-
        if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
-               struct si_shader_selector sel;
-               bool scratch_enabled;
-
-               memset(&sel, 0, sizeof(sel));
-
-               sel.tokens = tgsi_dup_tokens(cso->prog);
-               if (!sel.tokens) {
+               program->tokens = tgsi_dup_tokens(cso->prog);
+               if (!program->tokens) {
                        FREE(program);
                        return NULL;
                }
 
-               tgsi_scan_shader(cso->prog, &sel.info);
-               sel.type = PIPE_SHADER_COMPUTE;
-               sel.local_size = cso->req_local_mem;
-
+               program->compiler_ctx_state.tm = sctx->tm;
+               program->compiler_ctx_state.debug = sctx->b.debug;
+               program->compiler_ctx_state.is_debug_context = sctx->is_debug;
                p_atomic_inc(&sscreen->b.num_shaders_created);
-
-               program->shader.selector = &sel;
-               program->shader.is_monolithic = true;
-
-               if (si_shader_create(sscreen, sctx->tm, &program->shader,
-                                    &sctx->b.debug)) {
-                       FREE(sel.tokens);
-                       FREE(program);
-                       return NULL;
-               }
-
-               scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
-
-               shader->config.rsrc1 =
-                          S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
-                          S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
-                          S_00B848_DX10_CLAMP(1) |
-                          S_00B848_FLOAT_MODE(shader->config.float_mode);
-
-               shader->config.rsrc2 = S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR) |
-                          S_00B84C_SCRATCH_EN(scratch_enabled) |
-                          S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
-                          S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
-                          S_00B84C_LDS_SIZE(shader->config.lds_size);
-
-               program->variable_group_size =
-                       sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
-
-               FREE(sel.tokens);
-               program->shader.selector = NULL;
+               util_queue_fence_init(&program->ready);
+
+               if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
+                   sctx->is_debug ||
+                   r600_can_dump_shader(&sscreen->b, PIPE_SHADER_COMPUTE))
+                       si_create_compute_state_async(program, -1);
+               else
+                       util_queue_add_job(&sscreen->shader_compiler_queue,
+                                          program, &program->ready,
+                                          si_create_compute_state_async, NULL);
        } else {
                const struct pipe_llvm_program_header *header;
                const char *code;
                header = cso->prog;
                code = cso->prog + sizeof(struct pipe_llvm_program_header);
 
-               radeon_elf_read(code, header->num_bytes, &program->shader.binary);
+               ac_elf_read(code, header->num_bytes, &program->shader.binary);
                if (program->use_code_object_v2) {
                        const amd_kernel_code_t *code_object =
                                si_compute_get_code_object(program, 0);
@@ -170,7 +196,7 @@ static void *si_create_compute_state(
                                     &program->shader.config, 0);
                }
                si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
-                              PIPE_SHADER_COMPUTE, stderr);
+                              PIPE_SHADER_COMPUTE, stderr, true);
                if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
                        fprintf(stderr, "LLVM failed to upload shader\n");
                        FREE(program);
@@ -184,7 +210,24 @@ static void *si_create_compute_state(
 static void si_bind_compute_state(struct pipe_context *ctx, void *state)
 {
        struct si_context *sctx = (struct si_context*)ctx;
-       sctx->cs_shader_state.program = (struct si_compute*)state;
+       struct si_compute *program = (struct si_compute*)state;
+
+       sctx->cs_shader_state.program = program;
+       if (!program)
+               return;
+
+       /* Wait because we need active slot usage masks. */
+       if (program->ir_type == PIPE_SHADER_IR_TGSI)
+               util_queue_fence_wait(&program->ready);
+
+       si_set_active_descriptors(sctx,
+                                 SI_DESCS_FIRST_COMPUTE +
+                                 SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
+                                 program->active_const_and_shader_buffers);
+       si_set_active_descriptors(sctx,
+                                 SI_DESCS_FIRST_COMPUTE +
+                                 SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
+                                 program->active_samplers_and_images);
 }
 
 static void si_set_global_binding(
@@ -196,17 +239,19 @@ static void si_set_global_binding(
        struct si_context *sctx = (struct si_context*)ctx;
        struct si_compute *program = sctx->cs_shader_state.program;
 
+       assert(first + n <= MAX_GLOBAL_BUFFERS);
+
        if (!resources) {
-               for (i = first; i < first + n; i++) {
-                       pipe_resource_reference(&program->global_buffers[i], NULL);
+               for (i = 0; i < n; i++) {
+                       pipe_resource_reference(&program->global_buffers[first + i], NULL);
                }
                return;
        }
 
-       for (i = first; i < first + n; i++) {
+       for (i = 0; i < n; i++) {
                uint64_t va;
                uint32_t offset;
-               pipe_resource_reference(&program->global_buffers[i], resources[i]);
+               pipe_resource_reference(&program->global_buffers[first + i], resources[i]);
                va = r600_resource(resources[i])->gpu_address;
                offset = util_le32_to_cpu(*handles[i]);
                va += offset;
@@ -220,11 +265,6 @@ static void si_initialize_compute(struct si_context *sctx)
        struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
        uint64_t bc_va;
 
-       radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
-       radeon_emit(cs, 0);
-       radeon_emit(cs, 0);
-       radeon_emit(cs, 0);
-
        radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
        /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
        radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
@@ -287,8 +327,10 @@ static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
                r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
 
                sctx->compute_scratch_buffer = (struct r600_resource*)
-                       pipe_buffer_create(&sctx->screen->b.b, 0,
-                                          PIPE_USAGE_DEFAULT, scratch_needed);
+                       r600_aligned_buffer_create(&sctx->screen->b.b,
+                                                  R600_RESOURCE_FLAG_UNMAPPABLE,
+                                                  PIPE_USAGE_DEFAULT,
+                                                  scratch_needed, 256);
 
                if (!sctx->compute_scratch_buffer)
                        return false;
@@ -297,7 +339,7 @@ static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
        if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
                uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
 
-               si_shader_apply_scratch_relocs(sctx, shader, config, scratch_va);
+               si_shader_apply_scratch_relocs(shader, scratch_va);
 
                if (si_shader_binary_upload(sctx->screen, shader))
                        return false;
@@ -370,6 +412,18 @@ static bool si_switch_compute_shader(struct si_context *sctx,
                              RADEON_PRIO_SCRATCH_BUFFER);
        }
 
+       /* Prefetch the compute shader to TC L2.
+        *
+        * We should also prefetch graphics shaders if a compute dispatch was
+        * the last command, and the compute shader if a draw call was the last
+        * command. However, that would add more complexity and we're likely
+        * to get a shader state change in that case anyway.
+        */
+       if (sctx->b.chip_class >= CIK) {
+               cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
+                                        0, program->shader.bo->b.b.width0);
+       }
+
        shader_va = shader->bo->gpu_address + offset;
        if (program->use_code_object_v2) {
                /* Shader code is placed after the amd_kernel_code_t
@@ -422,16 +476,20 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
        /* Disable address clamping */
        uint32_t scratch_dword2 = 0xffffffff;
        uint32_t scratch_dword3 =
-               S_008F0C_ELEMENT_SIZE(max_private_element_size) |
                S_008F0C_INDEX_STRIDE(3) |
                S_008F0C_ADD_TID_ENABLE(1);
 
+       if (sctx->b.chip_class >= GFX9) {
+               assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
+       } else {
+               scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
 
-       if (sctx->screen->b.chip_class < VI) {
-               /* BUF_DATA_FORMAT is ignored, but it cannot be
-                  BUF_DATA_FORMAT_INVALID. */
-               scratch_dword3 |=
-                       S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
+               if (sctx->b.chip_class < VI) {
+                       /* BUF_DATA_FORMAT is ignored, but it cannot be
+                        * BUF_DATA_FORMAT_INVALID. */
+                       scratch_dword3 |=
+                               S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
+               }
        }
 
        radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
@@ -489,9 +547,9 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
 
                dispatch.kernarg_address = kernel_args_va;
 
-               u_upload_data(sctx->b.uploader, 0, sizeof(dispatch), 256,
-                               &dispatch, &dispatch_offset,
-                               (struct pipe_resource**)&dispatch_buf);
+               u_upload_data(sctx->b.b.const_uploader, 0, sizeof(dispatch),
+                              256, &dispatch, &dispatch_offset,
+                              (struct pipe_resource**)&dispatch_buf);
 
                if (!dispatch_buf) {
                        fprintf(stderr, "Error: Failed to allocate dispatch "
@@ -533,7 +591,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
        }
 }
 
-static void si_upload_compute_input(struct si_context *sctx,
+static bool si_upload_compute_input(struct si_context *sctx,
                                    const amd_kernel_code_t *code_object,
                                    const struct pipe_grid_info *info)
 {
@@ -551,10 +609,14 @@ static void si_upload_compute_input(struct si_context *sctx,
        /* The extra num_work_size_bytes are for work group / work item size information */
        kernel_args_size = program->input_size + num_work_size_bytes;
 
-       u_upload_alloc(sctx->b.uploader, 0, kernel_args_size, 256,
+       u_upload_alloc(sctx->b.b.const_uploader, 0, kernel_args_size,
+                      sctx->screen->b.info.tcc_cache_line_size,
                       &kernel_args_offset,
                       (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
 
+       if (unlikely(!kernel_args_ptr))
+               return false;
+
        kernel_args = (uint32_t*)kernel_args_ptr;
        kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
 
@@ -589,41 +651,50 @@ static void si_upload_compute_input(struct si_context *sctx,
        }
 
        r600_resource_reference(&input_buffer, NULL);
+
+       return true;
 }
 
 static void si_setup_tgsi_grid(struct si_context *sctx,
                                 const struct pipe_grid_info *info)
 {
+       struct si_compute *program = sctx->cs_shader_state.program;
        struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
        unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
-                                 4 * SI_SGPR_GRID_SIZE;
+                                4 * SI_NUM_RESOURCE_SGPRS;
+       unsigned block_size_reg = grid_size_reg +
+                                 /* 12 bytes = 3 dwords. */
+                                 12 * program->uses_grid_size;
 
        if (info->indirect) {
-               uint64_t base_va = r600_resource(info->indirect)->gpu_address;
-               uint64_t va = base_va + info->indirect_offset;
-               int i;
-
-               radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
-                                (struct r600_resource *)info->indirect,
-                                RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
-
-               for (i = 0; i < 3; ++i) {
-                       radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
-                       radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
-                                       COPY_DATA_DST_SEL(COPY_DATA_REG));
-                       radeon_emit(cs, (va +  4 * i));
-                       radeon_emit(cs, (va + 4 * i) >> 32);
-                       radeon_emit(cs, (grid_size_reg >> 2) + i);
-                       radeon_emit(cs, 0);
+               if (program->uses_grid_size) {
+                       uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+                       uint64_t va = base_va + info->indirect_offset;
+                       int i;
+
+                       radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+                                        (struct r600_resource *)info->indirect,
+                                        RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
+
+                       for (i = 0; i < 3; ++i) {
+                               radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+                               radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+                                               COPY_DATA_DST_SEL(COPY_DATA_REG));
+                               radeon_emit(cs, (va + 4 * i));
+                               radeon_emit(cs, (va + 4 * i) >> 32);
+                               radeon_emit(cs, (grid_size_reg >> 2) + i);
+                               radeon_emit(cs, 0);
+                       }
                }
        } else {
-               struct si_compute *program = sctx->cs_shader_state.program;
-
-               radeon_set_sh_reg_seq(cs, grid_size_reg, program->variable_group_size ? 6 : 3);
-               radeon_emit(cs, info->grid[0]);
-               radeon_emit(cs, info->grid[1]);
-               radeon_emit(cs, info->grid[2]);
-               if (program->variable_group_size) {
+               if (program->uses_grid_size) {
+                       radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
+                       radeon_emit(cs, info->grid[0]);
+                       radeon_emit(cs, info->grid[1]);
+                       radeon_emit(cs, info->grid[2]);
+               }
+               if (program->variable_group_size && program->uses_block_size) {
+                       radeon_set_sh_reg_seq(cs, block_size_reg, 3);
                        radeon_emit(cs, info->block[0]);
                        radeon_emit(cs, info->block[1]);
                        radeon_emit(cs, info->block[2]);
@@ -647,6 +718,13 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
        radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
        radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
 
+       unsigned dispatch_initiator =
+               S_00B800_COMPUTE_SHADER_EN(1) |
+               S_00B800_FORCE_START_AT_000(1) |
+               /* If the KMD allows it (there is a KMD hw register for it),
+                * allow launching waves out-of-order. (same as Vulkan) */
+               S_00B800_ORDER_MODE(sctx->b.chip_class >= CIK);
+
        if (info->indirect) {
                uint64_t base_va = r600_resource(info->indirect)->gpu_address;
 
@@ -663,14 +741,14 @@ static void si_emit_dispatch_packets(struct si_context *sctx,
                radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
                                PKT3_SHADER_TYPE_S(1));
                radeon_emit(cs, info->indirect_offset);
-               radeon_emit(cs, 1);
+               radeon_emit(cs, dispatch_initiator);
        } else {
                radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
                                PKT3_SHADER_TYPE_S(1));
                radeon_emit(cs, info->grid[0]);
                radeon_emit(cs, info->grid[1]);
                radeon_emit(cs, info->grid[2]);
-               radeon_emit(cs, 1);
+               radeon_emit(cs, dispatch_initiator);
        }
 }
 
@@ -699,6 +777,10 @@ static void si_launch_grid(
                sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
                                 SI_CONTEXT_CS_PARTIAL_FLUSH;
 
+       if (program->ir_type == PIPE_SHADER_IR_TGSI &&
+           program->shader.compilation_failed)
+               return;
+
        si_decompress_compute_textures(sctx);
 
        /* Add buffer sizes for memory checking in need_cs_space. */
@@ -708,8 +790,9 @@ static void si_launch_grid(
        if (info->indirect) {
                r600_context_add_resource_size(ctx, info->indirect);
 
-               /* The hw doesn't read the indirect buffer via TC L2. */
-               if (r600_resource(info->indirect)->TC_L2_dirty) {
+               /* Indirect buffers use TC L2 on GFX9, but not older hw. */
+               if (sctx->b.chip_class <= VI &&
+                   r600_resource(info->indirect)->TC_L2_dirty) {
                        sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
                        r600_resource(info->indirect)->TC_L2_dirty = false;
                }
@@ -736,8 +819,11 @@ static void si_launch_grid(
                si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
        }
 
-       if (program->input_size || program->ir_type == PIPE_SHADER_IR_NATIVE)
-               si_upload_compute_input(sctx, code_object, info);
+       if ((program->input_size ||
+            program->ir_type == PIPE_SHADER_IR_NATIVE) &&
+           unlikely(!si_upload_compute_input(sctx, code_object, info))) {
+               return;
+       }
 
        /* Global buffers */
        for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
@@ -778,6 +864,12 @@ static void si_delete_compute_state(struct pipe_context *ctx, void* state){
                return;
        }
 
+       if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+               util_queue_drop_job(&sctx->screen->shader_compiler_queue,
+                                   &program->ready);
+               util_queue_fence_destroy(&program->ready);
+       }
+
        if (program == sctx->cs_shader_state.program)
                sctx->cs_shader_state.program = NULL;