*
*/
+#include "tgsi/tgsi_parse.h"
#include "util/u_memory.h"
+#include "util/u_upload_mgr.h"
#include "radeon/r600_pipe_common.h"
#include "radeon/radeon_elf_util.h"
-#include "radeon/radeon_llvm_util.h"
#include "radeon/r600_cs.h"
#include "si_pipe.h"
#include "sid.h"
#define MAX_GLOBAL_BUFFERS 20
-#if HAVE_LLVM < 0x0305
-#define NUM_USER_SGPRS 2
-#else
-/* XXX: Even though we don't pass the scratch buffer via user sgprs any more
- * LLVM still expects that we specify 4 USER_SGPRS so it can remain compatible
- * with older mesa. */
-#define NUM_USER_SGPRS 4
-#endif
-
-static const char *scratch_rsrc_dword0_symbol =
- "SCRATCH_RSRC_DWORD0";
-
-static const char *scratch_rsrc_dword1_symbol =
- "SCRATCH_RSRC_DWORD1";
struct si_compute {
- struct si_context *ctx;
-
+ unsigned ir_type;
unsigned local_size;
unsigned private_size;
unsigned input_size;
struct si_shader shader;
- unsigned num_user_sgprs;
- struct r600_resource *input_buffer;
struct pipe_resource *global_buffers[MAX_GLOBAL_BUFFERS];
-
-#if HAVE_LLVM < 0x0306
- unsigned num_kernels;
- struct si_shader *kernels;
- LLVMContextRef llvm_ctx;
-#endif
};
static void *si_create_compute_state(
const struct pipe_compute_state *cso)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_screen *sscreen = (struct si_screen *)ctx->screen;
struct si_compute *program = CALLOC_STRUCT(si_compute);
- const struct pipe_llvm_program_header *header;
- const char *code;
+ struct si_shader *shader = &program->shader;
- header = cso->prog;
- code = cso->prog + sizeof(struct pipe_llvm_program_header);
- program->ctx = sctx;
+ program->ir_type = cso->ir_type;
program->local_size = cso->req_local_mem;
program->private_size = cso->req_private_mem;
program->input_size = cso->req_input_mem;
-#if HAVE_LLVM < 0x0306
- {
- unsigned i;
- program->llvm_ctx = LLVMContextCreate();
- program->num_kernels = radeon_llvm_get_num_kernels(program->llvm_ctx,
- code, header->num_bytes);
- program->kernels = CALLOC(sizeof(struct si_shader),
- program->num_kernels);
- for (i = 0; i < program->num_kernels; i++) {
- LLVMModuleRef mod = radeon_llvm_get_kernel_module(program->llvm_ctx, i,
- code, header->num_bytes);
- si_compile_llvm(sctx->screen, &program->kernels[i], mod);
- LLVMDisposeModule(mod);
+
+ if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
+ struct si_shader_selector sel;
+ bool scratch_enabled;
+
+ memset(&sel, 0, sizeof(sel));
+
+ sel.tokens = tgsi_dup_tokens(cso->prog);
+ if (!sel.tokens) {
+ FREE(program);
+ return NULL;
}
- }
-#else
- radeon_elf_read(code, header->num_bytes, &program->shader.binary, true);
- si_shader_binary_read(sctx->screen, &program->shader, &program->shader.binary);
+ tgsi_scan_shader(cso->prog, &sel.info);
+ sel.type = PIPE_SHADER_COMPUTE;
+ sel.local_size = cso->req_local_mem;
+
+ p_atomic_inc(&sscreen->b.num_shaders_created);
+
+ program->shader.selector = &sel;
+
+ if (si_shader_create(sscreen, sctx->tm, &program->shader,
+ &sctx->b.debug)) {
+ FREE(sel.tokens);
+ FREE(program);
+ return NULL;
+ }
+
+ scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
-#endif
- program->input_buffer = si_resource_create_custom(sctx->b.b.screen,
- PIPE_USAGE_IMMUTABLE, program->input_size);
+ shader->config.rsrc1 =
+ S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
+ S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
+ S_00B848_DX10_CLAMP(1) |
+ S_00B848_FLOAT_MODE(shader->config.float_mode);
+
+ shader->config.rsrc2 = S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR) |
+ S_00B84C_SCRATCH_EN(scratch_enabled) |
+ S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
+ S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
+ S_00B84C_LDS_SIZE(shader->config.lds_size);
+
+ FREE(sel.tokens);
+ } else {
+ const struct pipe_llvm_program_header *header;
+ const char *code;
+ header = cso->prog;
+ code = cso->prog + sizeof(struct pipe_llvm_program_header);
+
+ radeon_elf_read(code, header->num_bytes, &program->shader.binary);
+ si_shader_binary_read_config(&program->shader.binary,
+ &program->shader.config, 0);
+ si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
+ PIPE_SHADER_COMPUTE, stderr);
+ si_shader_binary_upload(sctx->screen, &program->shader);
+ }
return program;
}
}
}
-/**
- * This function computes the value for R_00B860_COMPUTE_TMPRING_SIZE.WAVES
- * /p block_layout is the number of threads in each work group.
- * /p grid layout is the number of work groups.
- */
-static unsigned compute_num_waves_for_scratch(
- const struct radeon_info *info,
- const uint *block_layout,
- const uint *grid_layout)
+static void si_initialize_compute(struct si_context *sctx)
{
- unsigned num_sh = MAX2(info->max_sh_per_se, 1);
- unsigned num_se = MAX2(info->max_se, 1);
- unsigned num_blocks = 1;
- unsigned threads_per_block = 1;
- unsigned waves_per_block;
- unsigned waves_per_sh;
- unsigned waves;
- unsigned scratch_waves;
- unsigned i;
-
- for (i = 0; i < 3; i++) {
- threads_per_block *= block_layout[i];
- num_blocks *= grid_layout[i];
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+
+ radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
+ radeon_emit(cs, 0);
+ radeon_emit(cs, 0);
+ radeon_emit(cs, 0);
+
+ radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
+ /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
+ radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
+ radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
+
+ if (sctx->b.chip_class >= CIK) {
+ /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
+ radeon_set_sh_reg_seq(cs,
+ R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
+ radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) |
+ S_00B864_SH1_CU_EN(0xffff));
+ radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) |
+ S_00B868_SH1_CU_EN(0xffff));
}
- waves_per_block = align(threads_per_block, 64) / 64;
- waves = waves_per_block * num_blocks;
- waves_per_sh = align(waves, num_sh * num_se) / (num_sh * num_se);
- scratch_waves = waves_per_sh * num_sh * num_se;
+ /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
+ * and is now per pipe, so it should be handled in the
+ * kernel if we want to use something other than the default value,
+ * which is now 0x22f.
+ */
+ if (sctx->b.chip_class <= SI) {
+ /* XXX: This should be:
+ * (number of compute units) * 4 * (waves per simd) - 1 */
- if (waves_per_block > waves_per_sh) {
- scratch_waves = waves_per_block * num_sh * num_se;
+ radeon_set_sh_reg(cs, R_00B82C_COMPUTE_MAX_WAVE_ID,
+ 0x190 /* Default value */);
}
- return scratch_waves;
+ sctx->cs_shader_state.emitted_program = NULL;
+ sctx->cs_shader_state.initialized = true;
}
-static void apply_scratch_relocs(const struct si_screen *sscreen,
- const struct radeon_shader_binary *binary,
- struct si_shader *shader, uint64_t scratch_va) {
- unsigned i;
- char *ptr;
- uint32_t scratch_rsrc_dword0 = scratch_va & 0xffffffff;
- uint32_t scratch_rsrc_dword1 =
- S_008F04_BASE_ADDRESS_HI(scratch_va >> 32)
- | S_008F04_STRIDE(shader->scratch_bytes_per_wave / 64);
+static bool si_setup_compute_scratch_buffer(struct si_context *sctx,
+ struct si_shader *shader,
+ struct si_shader_config *config)
+{
+ uint64_t scratch_bo_size, scratch_needed;
+ scratch_bo_size = 0;
+ scratch_needed = config->scratch_bytes_per_wave * sctx->scratch_waves;
+ if (sctx->compute_scratch_buffer)
+ scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
- if (!binary->reloc_count) {
- return;
+ if (scratch_bo_size < scratch_needed) {
+ r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
+
+ sctx->compute_scratch_buffer =
+ si_resource_create_custom(&sctx->screen->b.b,
+ PIPE_USAGE_DEFAULT, scratch_needed);
+
+ if (!sctx->compute_scratch_buffer)
+ return false;
}
- ptr = sscreen->b.ws->buffer_map(shader->bo->cs_buf, NULL,
- PIPE_TRANSFER_READ_WRITE);
- for (i = 0 ; i < binary->reloc_count; i++) {
- const struct radeon_shader_reloc *reloc = &binary->relocs[i];
- if (!strcmp(scratch_rsrc_dword0_symbol, reloc->name)) {
- util_memcpy_cpu_to_le32(ptr + reloc->offset,
- &scratch_rsrc_dword0, 4);
- } else if (!strcmp(scratch_rsrc_dword1_symbol, reloc->name)) {
- util_memcpy_cpu_to_le32(ptr + reloc->offset,
- &scratch_rsrc_dword1, 4);
+ if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
+ uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
+
+ si_shader_apply_scratch_relocs(sctx, shader, config, scratch_va);
+
+ if (si_shader_binary_upload(sctx->screen, shader))
+ return false;
+
+ r600_resource_reference(&shader->scratch_bo,
+ sctx->compute_scratch_buffer);
+ }
+
+ return true;
+}
+
+static bool si_switch_compute_shader(struct si_context *sctx,
+ struct si_compute *program,
+ struct si_shader *shader, unsigned offset)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct si_shader_config inline_config = {0};
+ struct si_shader_config *config;
+ uint64_t shader_va;
+
+ if (sctx->cs_shader_state.emitted_program == program &&
+ sctx->cs_shader_state.offset == offset)
+ return true;
+
+ if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+ config = &shader->config;
+ } else {
+ unsigned lds_blocks;
+
+ config = &inline_config;
+ si_shader_binary_read_config(&shader->binary, config, offset);
+
+ lds_blocks = config->lds_size;
+ /* XXX: We are over allocating LDS. For SI, the shader reports
+ * LDS in blocks of 256 bytes, so if there are 4 bytes lds
+ * allocated in the shader and 4 bytes allocated by the state
+ * tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
+ */
+ if (sctx->b.chip_class <= SI) {
+ lds_blocks += align(program->local_size, 256) >> 8;
+ } else {
+ lds_blocks += align(program->local_size, 512) >> 9;
}
+
+ assert(lds_blocks <= 0xFF);
+
+ config->rsrc2 &= C_00B84C_LDS_SIZE;
+ config->rsrc2 |= S_00B84C_LDS_SIZE(lds_blocks);
+ }
+
+ if (!si_setup_compute_scratch_buffer(sctx, shader, config))
+ return false;
+
+ if (shader->scratch_bo) {
+ COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
+ "Total Scratch: %u bytes\n", sctx->scratch_waves,
+ config->scratch_bytes_per_wave,
+ config->scratch_bytes_per_wave *
+ sctx->scratch_waves);
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ shader->scratch_bo, RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SCRATCH_BUFFER);
}
- sscreen->b.ws->buffer_unmap(shader->bo->cs_buf);
+
+ shader_va = shader->bo->gpu_address + offset;
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
+ RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
+
+ radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
+ radeon_emit(cs, shader_va >> 8);
+ radeon_emit(cs, shader_va >> 40);
+
+ radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
+ radeon_emit(cs, config->rsrc1);
+ radeon_emit(cs, config->rsrc2);
+
+ COMPUTE_DBG(sctx->screen, "COMPUTE_PGM_RSRC1: 0x%08x "
+ "COMPUTE_PGM_RSRC2: 0x%08x\n", config->rsrc1, config->rsrc2);
+
+ radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
+ S_00B860_WAVES(sctx->scratch_waves)
+ | S_00B860_WAVESIZE(config->scratch_bytes_per_wave >> 10));
+
+ sctx->cs_shader_state.emitted_program = program;
+ sctx->cs_shader_state.offset = offset;
+ sctx->cs_shader_state.uses_scratch =
+ config->scratch_bytes_per_wave != 0;
+
+ return true;
}
-static void si_launch_grid(
- struct pipe_context *ctx,
- const uint *block_layout, const uint *grid_layout,
- uint32_t pc, const void *input)
+static void si_upload_compute_input(struct si_context *sctx,
+ const struct pipe_grid_info *info)
{
- struct si_context *sctx = (struct si_context*)ctx;
- struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_compute *program = sctx->cs_shader_state.program;
- struct si_pm4_state *pm4 = CALLOC_STRUCT(si_pm4_state);
- struct r600_resource *input_buffer = program->input_buffer;
+ struct r600_resource *input_buffer = NULL;
unsigned kernel_args_size;
unsigned num_work_size_bytes = 36;
uint32_t kernel_args_offset = 0;
uint32_t *kernel_args;
+ void *kernel_args_ptr;
uint64_t kernel_args_va;
- uint64_t scratch_buffer_va = 0;
- uint64_t shader_va;
- unsigned arg_user_sgpr_count = NUM_USER_SGPRS;
unsigned i;
- struct si_shader *shader = &program->shader;
- unsigned lds_blocks;
- unsigned num_waves_for_scratch;
-#if HAVE_LLVM < 0x0306
- shader = &program->kernels[pc];
-#endif
+ /* The extra num_work_size_bytes are for work group / work item size information */
+ kernel_args_size = program->input_size + num_work_size_bytes;
+ u_upload_alloc(sctx->b.uploader, 0, kernel_args_size, 256,
+ &kernel_args_offset,
+ (struct pipe_resource**)&input_buffer, &kernel_args_ptr);
- radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0) | PKT3_SHADER_TYPE_S(1));
- radeon_emit(cs, 0x80000000);
- radeon_emit(cs, 0x80000000);
+ kernel_args = (uint32_t*)kernel_args_ptr;
+ for (i = 0; i < 3; i++) {
+ kernel_args[i] = info->grid[i];
+ kernel_args[i + 3] = info->grid[i] * info->block[i];
+ kernel_args[i + 6] = info->block[i];
+ }
- sctx->b.flags |= SI_CONTEXT_INV_TC_L1 |
- SI_CONTEXT_INV_TC_L2 |
- SI_CONTEXT_INV_ICACHE |
- SI_CONTEXT_INV_KCACHE |
- SI_CONTEXT_FLUSH_WITH_INV_L2 |
- SI_CONTEXT_FLAG_COMPUTE;
- si_emit_cache_flush(&sctx->b, NULL);
+ memcpy(kernel_args + (num_work_size_bytes / 4), info->input,
+ program->input_size);
- pm4->compute_pkt = true;
-#if HAVE_LLVM >= 0x0306
- /* Read the config information */
- si_shader_binary_read_config(&program->shader.binary, shader, pc);
-#endif
+ for (i = 0; i < (kernel_args_size / 4); i++) {
+ COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
+ kernel_args[i]);
+ }
- /* Upload the kernel arguments */
+ kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
- /* The extra num_work_size_bytes are for work group / work item size information */
- kernel_args_size = program->input_size + num_work_size_bytes + 8 /* For scratch va */;
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
+ RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
- kernel_args = sctx->b.ws->buffer_map(input_buffer->cs_buf,
- sctx->b.rings.gfx.cs, PIPE_TRANSFER_WRITE);
- for (i = 0; i < 3; i++) {
- kernel_args[i] = grid_layout[i];
- kernel_args[i + 3] = grid_layout[i] * block_layout[i];
- kernel_args[i + 6] = block_layout[i];
+ radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
+ radeon_emit(cs, kernel_args_va);
+ radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
+ S_008F04_STRIDE(0));
+
+ r600_resource_reference(&input_buffer, NULL);
+}
+
+static void si_setup_tgsi_grid(struct si_context *sctx,
+ const struct pipe_grid_info *info)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
+ 4 * SI_SGPR_GRID_SIZE;
+
+ if (info->indirect) {
+ uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+ uint64_t va = base_va + info->indirect_offset;
+ int i;
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ (struct r600_resource *)info->indirect,
+ RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
+
+ for (i = 0; i < 3; ++i) {
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG));
+ radeon_emit(cs, (va + 4 * i));
+ radeon_emit(cs, (va + 4 * i) >> 32);
+ radeon_emit(cs, (grid_size_reg >> 2) + i);
+ radeon_emit(cs, 0);
+ }
+ } else {
+
+ radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
+ radeon_emit(cs, info->grid[0]);
+ radeon_emit(cs, info->grid[1]);
+ radeon_emit(cs, info->grid[2]);
}
+}
- num_waves_for_scratch = compute_num_waves_for_scratch(
- &sctx->screen->b.info, block_layout, grid_layout);
+static void si_emit_dispatch_packets(struct si_context *sctx,
+ const struct pipe_grid_info *info)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
+ unsigned waves_per_threadgroup =
+ DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
+
+ radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
+ S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0));
+
+ radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
+ radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
+ radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
+ radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
+
+ if (info->indirect) {
+ uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ (struct r600_resource *)info->indirect,
+ RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
+
+ radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
+ PKT3_SHADER_TYPE_S(1));
+ radeon_emit(cs, 1);
+ radeon_emit(cs, base_va);
+ radeon_emit(cs, base_va >> 32);
+
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
+ PKT3_SHADER_TYPE_S(1));
+ radeon_emit(cs, info->indirect_offset);
+ radeon_emit(cs, 1);
+ } else {
+ radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
+ PKT3_SHADER_TYPE_S(1));
+ radeon_emit(cs, info->grid[0]);
+ radeon_emit(cs, info->grid[1]);
+ radeon_emit(cs, info->grid[2]);
+ radeon_emit(cs, 1);
+ }
+}
+
+
+static void si_launch_grid(
+ struct pipe_context *ctx, const struct pipe_grid_info *info)
+{
+ struct si_context *sctx = (struct si_context*)ctx;
+ struct si_compute *program = sctx->cs_shader_state.program;
+ int i;
+ /* HW bug workaround when CS threadgroups > 256 threads and async
+ * compute isn't used, i.e. only one compute job can run at a time.
+ * If async compute is possible, the threadgroup size must be limited
+ * to 256 threads on all queues to avoid the bug.
+ * Only SI and certain CIK chips are affected.
+ */
+ bool cs_regalloc_hang =
+ (sctx->b.chip_class == SI ||
+ sctx->b.family == CHIP_BONAIRE ||
+ sctx->b.family == CHIP_KABINI) &&
+ info->block[0] * info->block[1] * info->block[2] > 256;
- memcpy(kernel_args + (num_work_size_bytes / 4), input, program->input_size);
+ if (cs_regalloc_hang)
+ sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
- if (shader->scratch_bytes_per_wave > 0) {
- unsigned scratch_bytes = shader->scratch_bytes_per_wave *
- num_waves_for_scratch;
+ si_decompress_compute_textures(sctx);
- COMPUTE_DBG(sctx->screen, "Waves: %u; Scratch per wave: %u bytes; "
- "Total Scratch: %u bytes\n", num_waves_for_scratch,
- shader->scratch_bytes_per_wave, scratch_bytes);
- if (!shader->scratch_bo) {
- shader->scratch_bo = (struct r600_resource*)
- si_resource_create_custom(sctx->b.b.screen,
- PIPE_USAGE_DEFAULT, scratch_bytes);
- }
- scratch_buffer_va = shader->scratch_bo->gpu_address;
- si_pm4_add_bo(pm4, shader->scratch_bo,
- RADEON_USAGE_READWRITE,
- RADEON_PRIO_SHADER_RESOURCE_RW);
+ /* Add buffer sizes for memory checking in need_cs_space. */
+ r600_context_add_resource_size(ctx, &program->shader.bo->b.b);
+ /* TODO: add the scratch buffer */
- /* Patch the shader with the scratch buffer address. */
- apply_scratch_relocs(sctx->screen,
- &program->shader.binary, shader, scratch_buffer_va);
+ if (info->indirect) {
+ r600_context_add_resource_size(ctx, info->indirect);
+ /* The hw doesn't read the indirect buffer via TC L2. */
+ if (r600_resource(info->indirect)->TC_L2_dirty) {
+ sctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2;
+ r600_resource(info->indirect)->TC_L2_dirty = false;
+ }
}
- for (i = 0; i < (kernel_args_size / 4); i++) {
- COMPUTE_DBG(sctx->screen, "input %u : %u\n", i,
- kernel_args[i]);
- }
+ si_need_cs_space(sctx);
- sctx->b.ws->buffer_unmap(input_buffer->cs_buf);
+ if (!sctx->cs_shader_state.initialized)
+ si_initialize_compute(sctx);
- kernel_args_va = input_buffer->gpu_address;
- kernel_args_va += kernel_args_offset;
+ if (sctx->b.flags)
+ si_emit_cache_flush(sctx);
- si_pm4_add_bo(pm4, input_buffer, RADEON_USAGE_READ,
- RADEON_PRIO_SHADER_DATA);
+ if (!si_switch_compute_shader(sctx, program, &program->shader, info->pc))
+ return;
- si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0, kernel_args_va);
- si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0 + 4, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) | S_008F04_STRIDE(0));
- si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0 + 8, scratch_buffer_va);
- si_pm4_set_reg(pm4, R_00B900_COMPUTE_USER_DATA_0 + 12,
- S_008F04_BASE_ADDRESS_HI(scratch_buffer_va >> 32)
- | S_008F04_STRIDE(shader->scratch_bytes_per_wave / 64));
+ si_upload_compute_shader_descriptors(sctx);
+ si_emit_compute_shader_userdata(sctx);
- si_pm4_set_reg(pm4, R_00B810_COMPUTE_START_X, 0);
- si_pm4_set_reg(pm4, R_00B814_COMPUTE_START_Y, 0);
- si_pm4_set_reg(pm4, R_00B818_COMPUTE_START_Z, 0);
+ if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
+ sctx->atoms.s.render_cond->emit(&sctx->b,
+ sctx->atoms.s.render_cond);
+ si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
+ }
- si_pm4_set_reg(pm4, R_00B81C_COMPUTE_NUM_THREAD_X,
- S_00B81C_NUM_THREAD_FULL(block_layout[0]));
- si_pm4_set_reg(pm4, R_00B820_COMPUTE_NUM_THREAD_Y,
- S_00B820_NUM_THREAD_FULL(block_layout[1]));
- si_pm4_set_reg(pm4, R_00B824_COMPUTE_NUM_THREAD_Z,
- S_00B824_NUM_THREAD_FULL(block_layout[2]));
+ if (program->input_size || program->ir_type == PIPE_SHADER_IR_NATIVE)
+ si_upload_compute_input(sctx, info);
/* Global buffers */
for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
if (!buffer) {
continue;
}
- si_pm4_add_bo(pm4, buffer, RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW);
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_COMPUTE_GLOBAL);
}
- /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
- * and is now per pipe, so it should be handled in the
- * kernel if we want to use something other than the default value,
- * which is now 0x22f.
- */
- if (sctx->b.chip_class <= SI) {
- /* XXX: This should be:
- * (number of compute units) * 4 * (waves per simd) - 1 */
+ if (program->ir_type == PIPE_SHADER_IR_TGSI)
+ si_setup_tgsi_grid(sctx, info);
- si_pm4_set_reg(pm4, R_00B82C_COMPUTE_MAX_WAVE_ID,
- 0x190 /* Default value */);
- }
+ si_ce_pre_draw_synchronization(sctx);
- shader_va = shader->bo->gpu_address;
-
-#if HAVE_LLVM >= 0x0306
- shader_va += pc;
-#endif
- si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
- si_pm4_set_reg(pm4, R_00B830_COMPUTE_PGM_LO, (shader_va >> 8) & 0xffffffff);
- si_pm4_set_reg(pm4, R_00B834_COMPUTE_PGM_HI, shader_va >> 40);
-
- si_pm4_set_reg(pm4, R_00B848_COMPUTE_PGM_RSRC1,
- /* We always use at least 3 VGPRS, these come from
- * TIDIG_COMP_CNT.
- * XXX: The compiler should account for this.
- */
- S_00B848_VGPRS((MAX2(3, shader->num_vgprs) - 1) / 4)
- /* We always use at least 4 + arg_user_sgpr_count. The 4 extra
- * sgprs are from TGID_X_EN, TGID_Y_EN, TGID_Z_EN, TG_SIZE_EN
- * XXX: The compiler should account for this.
- */
- | S_00B848_SGPRS(((MAX2(4 + arg_user_sgpr_count,
- shader->num_sgprs)) - 1) / 8))
- ;
-
- lds_blocks = shader->lds_size;
- /* XXX: We are over allocating LDS. For SI, the shader reports LDS in
- * blocks of 256 bytes, so if there are 4 bytes lds allocated in
- * the shader and 4 bytes allocated by the state tracker, then
- * we will set LDS_SIZE to 512 bytes rather than 256.
- */
- if (sctx->b.chip_class <= SI) {
- lds_blocks += align(program->local_size, 256) >> 8;
- } else {
- lds_blocks += align(program->local_size, 512) >> 9;
- }
+ si_emit_dispatch_packets(sctx, info);
- assert(lds_blocks <= 0xFF);
-
- si_pm4_set_reg(pm4, R_00B84C_COMPUTE_PGM_RSRC2,
- S_00B84C_SCRATCH_EN(shader->scratch_bytes_per_wave > 0)
- | S_00B84C_USER_SGPR(arg_user_sgpr_count)
- | S_00B84C_TGID_X_EN(1)
- | S_00B84C_TGID_Y_EN(1)
- | S_00B84C_TGID_Z_EN(1)
- | S_00B84C_TG_SIZE_EN(1)
- | S_00B84C_TIDIG_COMP_CNT(2)
- | S_00B84C_LDS_SIZE(lds_blocks)
- | S_00B84C_EXCP_EN(0))
- ;
- si_pm4_set_reg(pm4, R_00B854_COMPUTE_RESOURCE_LIMITS, 0);
-
- si_pm4_set_reg(pm4, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0,
- S_00B858_SH0_CU_EN(0xffff /* Default value */)
- | S_00B858_SH1_CU_EN(0xffff /* Default value */))
- ;
-
- si_pm4_set_reg(pm4, R_00B85C_COMPUTE_STATIC_THREAD_MGMT_SE1,
- S_00B85C_SH0_CU_EN(0xffff /* Default value */)
- | S_00B85C_SH1_CU_EN(0xffff /* Default value */))
- ;
-
- num_waves_for_scratch =
- MIN2(num_waves_for_scratch,
- 32 * sctx->screen->b.info.max_compute_units);
- si_pm4_set_reg(pm4, R_00B860_COMPUTE_TMPRING_SIZE,
- /* The maximum value for WAVES is 32 * num CU.
- * If you program this value incorrectly, the GPU will hang if
- * COMPUTE_PGM_RSRC2.SCRATCH_EN is enabled.
- */
- S_00B860_WAVES(num_waves_for_scratch)
- | S_00B860_WAVESIZE(shader->scratch_bytes_per_wave >> 10))
- ;
-
- si_pm4_cmd_begin(pm4, PKT3_DISPATCH_DIRECT);
- si_pm4_cmd_add(pm4, grid_layout[0]); /* Thread groups DIM_X */
- si_pm4_cmd_add(pm4, grid_layout[1]); /* Thread groups DIM_Y */
- si_pm4_cmd_add(pm4, grid_layout[2]); /* Thread gropus DIM_Z */
- si_pm4_cmd_add(pm4, 1); /* DISPATCH_INITIATOR */
- si_pm4_cmd_end(pm4, false);
-
- si_pm4_emit(sctx, pm4);
-
-#if 0
- fprintf(stderr, "cdw: %i\n", sctx->cs->cdw);
- for (i = 0; i < sctx->cs->cdw; i++) {
- fprintf(stderr, "%4i : 0x%08X\n", i, sctx->cs->buf[i]);
- }
-#endif
+ si_ce_post_draw_synchronization(sctx);
- si_pm4_free_state(sctx, pm4, ~0);
+ sctx->compute_is_busy = true;
+ sctx->b.num_compute_calls++;
+ if (sctx->cs_shader_state.uses_scratch)
+ sctx->b.num_spill_compute_calls++;
- sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
- SI_CONTEXT_INV_TC_L1 |
- SI_CONTEXT_INV_TC_L2 |
- SI_CONTEXT_INV_ICACHE |
- SI_CONTEXT_INV_KCACHE |
- SI_CONTEXT_FLAG_COMPUTE;
- si_emit_cache_flush(&sctx->b, NULL);
+ if (cs_regalloc_hang)
+ sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
}
static void si_delete_compute_state(struct pipe_context *ctx, void* state){
struct si_compute *program = (struct si_compute *)state;
+ struct si_context *sctx = (struct si_context*)ctx;
if (!state) {
return;
}
-#if HAVE_LLVM < 0x0306
- if (program->kernels) {
- for (int i = 0; i < program->num_kernels; i++){
- if (program->kernels[i].bo){
- si_shader_destroy(ctx, &program->kernels[i]);
- }
- }
- FREE(program->kernels);
- }
-
- if (program->llvm_ctx){
- LLVMContextDispose(program->llvm_ctx);
- }
-#else
- si_shader_destroy(ctx, &program->shader);
-#endif
+ if (program == sctx->cs_shader_state.program)
+ sctx->cs_shader_state.program = NULL;
- pipe_resource_reference(
- (struct pipe_resource **)&program->input_buffer, NULL);
+ if (program == sctx->cs_shader_state.emitted_program)
+ sctx->cs_shader_state.emitted_program = NULL;
- radeon_shader_binary_free_members(&program->shader.binary, true);
+ si_shader_destroy(&program->shader);
FREE(program);
}