sel.tokens = program->tokens;
sel.type = PIPE_SHADER_COMPUTE;
sel.local_size = program->local_size;
+ si_get_active_slot_masks(&sel.info,
+ &program->active_const_and_shader_buffers,
+ &program->active_samplers_and_images);
program->shader.selector = &sel;
program->shader.is_monolithic = true;
program->uses_grid_size = sel.info.uses_grid_size;
program->uses_block_size = sel.info.uses_block_size;
+ program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
+ program->uses_bindless_images = sel.info.uses_bindless_images;
if (si_shader_create(program->screen, tm, &program->shader, debug)) {
program->shader.compilation_failed = true;
static void si_bind_compute_state(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context*)ctx;
- sctx->cs_shader_state.program = (struct si_compute*)state;
+ struct si_compute *program = (struct si_compute*)state;
+
+ sctx->cs_shader_state.program = program;
+ if (!program)
+ return;
+
+ /* Wait because we need active slot usage masks. */
+ if (program->ir_type == PIPE_SHADER_IR_TGSI)
+ util_queue_fence_wait(&program->ready);
+
+ si_set_active_descriptors(sctx,
+ SI_DESCS_FIRST_COMPUTE +
+ SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
+ program->active_const_and_shader_buffers);
+ si_set_active_descriptors(sctx,
+ SI_DESCS_FIRST_COMPUTE +
+ SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
+ program->active_samplers_and_images);
}
static void si_set_global_binding(
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
uint64_t bc_va;
- radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
- radeon_emit(cs, 0);
- radeon_emit(cs, 0);
- radeon_emit(cs, 0);
-
radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
/* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
+ unsigned dispatch_initiator =
+ S_00B800_COMPUTE_SHADER_EN(1) |
+ S_00B800_FORCE_START_AT_000(1) |
+ /* If the KMD allows it (there is a KMD hw register for it),
+ * allow launching waves out-of-order. (same as Vulkan) */
+ S_00B800_ORDER_MODE(sctx->b.chip_class >= CIK);
+
if (info->indirect) {
uint64_t base_va = r600_resource(info->indirect)->gpu_address;
radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, info->indirect_offset);
- radeon_emit(cs, 1);
+ radeon_emit(cs, dispatch_initiator);
} else {
radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, info->grid[0]);
radeon_emit(cs, info->grid[1]);
radeon_emit(cs, info->grid[2]);
- radeon_emit(cs, 1);
+ radeon_emit(cs, dispatch_initiator);
}
}
sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
SI_CONTEXT_CS_PARTIAL_FLUSH;
- if (program->ir_type == PIPE_SHADER_IR_TGSI) {
- util_queue_fence_wait(&program->ready);
-
- if (program->shader.compilation_failed)
- return;
- }
+ if (program->ir_type == PIPE_SHADER_IR_TGSI &&
+ program->shader.compilation_failed)
+ return;
si_decompress_compute_textures(sctx);
if (info->indirect) {
r600_context_add_resource_size(ctx, info->indirect);
- /* The hw doesn't read the indirect buffer via TC L2. */
- if (r600_resource(info->indirect)->TC_L2_dirty) {
+ /* Indirect buffers use TC L2 on GFX9, but not older hw. */
+ if (sctx->b.chip_class <= VI &&
+ r600_resource(info->indirect)->TC_L2_dirty) {
sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
r600_resource(info->indirect)->TC_L2_dirty = false;
}
}
if (program->ir_type == PIPE_SHADER_IR_TGSI) {
- util_queue_fence_wait(&program->ready);
+ util_queue_drop_job(&sctx->screen->shader_compiler_queue,
+ &program->ready);
util_queue_fence_destroy(&program->ready);
}