#include "tgsi/tgsi_parse.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
-#include "radeon/r600_pipe_common.h"
-#include "radeon/radeon_elf_util.h"
+#include "amd_kernel_code_t.h"
#include "radeon/r600_cs.h"
#include "si_pipe.h"
-#include "si_shader.h"
+#include "si_compute.h"
#include "sid.h"
-#define MAX_GLOBAL_BUFFERS 20
+struct dispatch_packet {
+ uint16_t header;
+ uint16_t setup;
+ uint16_t workgroup_size_x;
+ uint16_t workgroup_size_y;
+ uint16_t workgroup_size_z;
+ uint16_t reserved0;
+ uint32_t grid_size_x;
+ uint32_t grid_size_y;
+ uint32_t grid_size_z;
+ uint32_t private_segment_size;
+ uint32_t group_segment_size;
+ uint64_t kernel_object;
+ uint64_t kernarg_address;
+ uint64_t reserved2;
+};
-struct si_compute {
- unsigned ir_type;
- unsigned local_size;
- unsigned private_size;
- unsigned input_size;
- struct si_shader shader;
+static const amd_kernel_code_t *si_compute_get_code_object(
+ const struct si_compute *program,
+ uint64_t symbol_offset)
+{
+ if (!program->use_code_object_v2) {
+ return NULL;
+ }
+ return (const amd_kernel_code_t*)
+ (program->shader.binary.code + symbol_offset);
+}
- struct pipe_resource *global_buffers[MAX_GLOBAL_BUFFERS];
-};
+static void code_object_to_config(const amd_kernel_code_t *code_object,
+ struct si_shader_config *out_config) {
+
+ uint32_t rsrc1 = code_object->compute_pgm_resource_registers;
+ uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32;
+ out_config->num_sgprs = code_object->wavefront_sgpr_count;
+ out_config->num_vgprs = code_object->workitem_vgpr_count;
+ out_config->float_mode = G_00B028_FLOAT_MODE(rsrc1);
+ out_config->rsrc1 = rsrc1;
+ out_config->lds_size = MAX2(out_config->lds_size, G_00B84C_LDS_SIZE(rsrc2));
+ out_config->rsrc2 = rsrc2;
+ out_config->scratch_bytes_per_wave =
+ align(code_object->workitem_private_segment_byte_size * 64, 1024);
+}
+
+/* Asynchronous compute shader compilation. */
+static void si_create_compute_state_async(void *job, int thread_index)
+{
+ struct si_compute *program = (struct si_compute *)job;
+ struct si_shader *shader = &program->shader;
+ struct si_shader_selector sel;
+ LLVMTargetMachineRef tm;
+ struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug;
+
+ if (thread_index >= 0) {
+ assert(thread_index < ARRAY_SIZE(program->screen->tm));
+ tm = program->screen->tm[thread_index];
+ if (!debug->async)
+ debug = NULL;
+ } else {
+ tm = program->compiler_ctx_state.tm;
+ }
+
+ memset(&sel, 0, sizeof(sel));
+
+ sel.screen = program->screen;
+ tgsi_scan_shader(program->tokens, &sel.info);
+ sel.tokens = program->tokens;
+ sel.type = PIPE_SHADER_COMPUTE;
+ sel.local_size = program->local_size;
+ si_get_active_slot_masks(&sel.info,
+ &program->active_const_and_shader_buffers,
+ &program->active_samplers_and_images);
+
+ program->shader.selector = &sel;
+ program->shader.is_monolithic = true;
+ program->uses_grid_size = sel.info.uses_grid_size;
+ program->uses_block_size = sel.info.uses_block_size;
+ program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
+ program->uses_bindless_images = sel.info.uses_bindless_images;
+
+ if (si_shader_create(program->screen, tm, &program->shader, debug)) {
+ program->shader.compilation_failed = true;
+ } else {
+ bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
+ unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS +
+ (sel.info.uses_grid_size ? 3 : 0) +
+ (sel.info.uses_block_size ? 3 : 0);
+
+ shader->config.rsrc1 =
+ S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
+ S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
+ S_00B848_DX10_CLAMP(1) |
+ S_00B848_FLOAT_MODE(shader->config.float_mode);
+
+ shader->config.rsrc2 =
+ S_00B84C_USER_SGPR(user_sgprs) |
+ S_00B84C_SCRATCH_EN(scratch_enabled) |
+ S_00B84C_TGID_X_EN(sel.info.uses_block_id[0]) |
+ S_00B84C_TGID_Y_EN(sel.info.uses_block_id[1]) |
+ S_00B84C_TGID_Z_EN(sel.info.uses_block_id[2]) |
+ S_00B84C_TIDIG_COMP_CNT(sel.info.uses_thread_id[2] ? 2 :
+ sel.info.uses_thread_id[1] ? 1 : 0) |
+ S_00B84C_LDS_SIZE(shader->config.lds_size);
+
+ program->variable_group_size =
+ sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
+ }
+
+ FREE(program->tokens);
+ program->shader.selector = NULL;
+}
static void *si_create_compute_state(
struct pipe_context *ctx,
struct si_context *sctx = (struct si_context *)ctx;
struct si_screen *sscreen = (struct si_screen *)ctx->screen;
struct si_compute *program = CALLOC_STRUCT(si_compute);
- struct si_shader *shader = &program->shader;
-
+ pipe_reference_init(&program->reference, 1);
+ program->screen = (struct si_screen *)ctx->screen;
program->ir_type = cso->ir_type;
program->local_size = cso->req_local_mem;
program->private_size = cso->req_private_mem;
program->input_size = cso->req_input_mem;
-
+ program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
+ cso->ir_type == PIPE_SHADER_IR_NATIVE;
if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
- struct si_shader_selector sel;
- bool scratch_enabled;
-
- memset(&sel, 0, sizeof(sel));
-
- sel.tokens = tgsi_dup_tokens(cso->prog);
- if (!sel.tokens) {
+ program->tokens = tgsi_dup_tokens(cso->prog);
+ if (!program->tokens) {
FREE(program);
return NULL;
}
- tgsi_scan_shader(cso->prog, &sel.info);
- sel.type = PIPE_SHADER_COMPUTE;
- sel.local_size = cso->req_local_mem;
-
+ program->compiler_ctx_state.tm = sctx->tm;
+ program->compiler_ctx_state.debug = sctx->b.debug;
+ program->compiler_ctx_state.is_debug_context = sctx->is_debug;
p_atomic_inc(&sscreen->b.num_shaders_created);
-
- program->shader.selector = &sel;
-
- if (si_shader_create(sscreen, sctx->tm, &program->shader,
- &sctx->b.debug)) {
- FREE(sel.tokens);
- FREE(program);
- return NULL;
- }
-
- scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
-
- shader->config.rsrc1 =
- S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) |
- S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) |
- S_00B848_DX10_CLAMP(1) |
- S_00B848_FLOAT_MODE(shader->config.float_mode);
-
- shader->config.rsrc2 = S_00B84C_USER_SGPR(SI_CS_NUM_USER_SGPR) |
- S_00B84C_SCRATCH_EN(scratch_enabled) |
- S_00B84C_TGID_X_EN(1) | S_00B84C_TGID_Y_EN(1) |
- S_00B84C_TGID_Z_EN(1) | S_00B84C_TIDIG_COMP_CNT(2) |
- S_00B84C_LDS_SIZE(shader->config.lds_size);
-
- FREE(sel.tokens);
+ util_queue_fence_init(&program->ready);
+
+ if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
+ sctx->is_debug ||
+ si_can_dump_shader(&sscreen->b, PIPE_SHADER_COMPUTE))
+ si_create_compute_state_async(program, -1);
+ else
+ util_queue_add_job(&sscreen->shader_compiler_queue,
+ program, &program->ready,
+ si_create_compute_state_async, NULL);
} else {
const struct pipe_llvm_program_header *header;
const char *code;
header = cso->prog;
code = cso->prog + sizeof(struct pipe_llvm_program_header);
- radeon_elf_read(code, header->num_bytes, &program->shader.binary);
- si_shader_binary_read_config(&program->shader.binary,
- &program->shader.config, 0);
+ ac_elf_read(code, header->num_bytes, &program->shader.binary);
+ if (program->use_code_object_v2) {
+ const amd_kernel_code_t *code_object =
+ si_compute_get_code_object(program, 0);
+ code_object_to_config(code_object, &program->shader.config);
+ } else {
+ si_shader_binary_read_config(&program->shader.binary,
+ &program->shader.config, 0);
+ }
si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
- PIPE_SHADER_COMPUTE, stderr);
- si_shader_binary_upload(sctx->screen, &program->shader);
+ PIPE_SHADER_COMPUTE, stderr, true);
+ if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
+ fprintf(stderr, "LLVM failed to upload shader\n");
+ FREE(program);
+ return NULL;
+ }
}
return program;
static void si_bind_compute_state(struct pipe_context *ctx, void *state)
{
struct si_context *sctx = (struct si_context*)ctx;
- sctx->cs_shader_state.program = (struct si_compute*)state;
+ struct si_compute *program = (struct si_compute*)state;
+
+ sctx->cs_shader_state.program = program;
+ if (!program)
+ return;
+
+ /* Wait because we need active slot usage masks. */
+ if (program->ir_type == PIPE_SHADER_IR_TGSI)
+ util_queue_fence_wait(&program->ready);
+
+ si_set_active_descriptors(sctx,
+ SI_DESCS_FIRST_COMPUTE +
+ SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
+ program->active_const_and_shader_buffers);
+ si_set_active_descriptors(sctx,
+ SI_DESCS_FIRST_COMPUTE +
+ SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
+ program->active_samplers_and_images);
}
static void si_set_global_binding(
struct si_context *sctx = (struct si_context*)ctx;
struct si_compute *program = sctx->cs_shader_state.program;
+ assert(first + n <= MAX_GLOBAL_BUFFERS);
+
if (!resources) {
- for (i = first; i < first + n; i++) {
- pipe_resource_reference(&program->global_buffers[i], NULL);
+ for (i = 0; i < n; i++) {
+ pipe_resource_reference(&program->global_buffers[first + i], NULL);
}
return;
}
- for (i = first; i < first + n; i++) {
+ for (i = 0; i < n; i++) {
uint64_t va;
uint32_t offset;
- pipe_resource_reference(&program->global_buffers[i], resources[i]);
+ pipe_resource_reference(&program->global_buffers[first + i], resources[i]);
va = r600_resource(resources[i])->gpu_address;
offset = util_le32_to_cpu(*handles[i]);
va += offset;
static void si_initialize_compute(struct si_context *sctx)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
-
- radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
- radeon_emit(cs, 0);
- radeon_emit(cs, 0);
- radeon_emit(cs, 0);
+ uint64_t bc_va;
radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
/* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */
0x190 /* Default value */);
}
+ /* Set the pointer to border colors. */
+ bc_va = sctx->border_color_buffer->gpu_address;
+
+ if (sctx->b.chip_class >= CIK) {
+ radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
+ radeon_emit(cs, bc_va >> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
+ radeon_emit(cs, bc_va >> 40); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
+ } else {
+ if (sctx->screen->b.info.drm_major == 3 ||
+ (sctx->screen->b.info.drm_major == 2 &&
+ sctx->screen->b.info.drm_minor >= 48)) {
+ radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR,
+ bc_va >> 8);
+ }
+ }
+
sctx->cs_shader_state.emitted_program = NULL;
sctx->cs_shader_state.initialized = true;
}
if (scratch_bo_size < scratch_needed) {
r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
- sctx->compute_scratch_buffer =
- si_resource_create_custom(&sctx->screen->b.b,
- PIPE_USAGE_DEFAULT, scratch_needed);
+ sctx->compute_scratch_buffer = (struct r600_resource*)
+ si_aligned_buffer_create(&sctx->screen->b.b,
+ R600_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ scratch_needed, 256);
if (!sctx->compute_scratch_buffer)
return false;
if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
- si_shader_apply_scratch_relocs(sctx, shader, config, scratch_va);
+ si_shader_apply_scratch_relocs(shader, scratch_va);
if (si_shader_binary_upload(sctx->screen, shader))
return false;
static bool si_switch_compute_shader(struct si_context *sctx,
struct si_compute *program,
- struct si_shader *shader, unsigned offset)
+ struct si_shader *shader,
+ const amd_kernel_code_t *code_object,
+ unsigned offset)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_shader_config inline_config = {0};
unsigned lds_blocks;
config = &inline_config;
- si_shader_binary_read_config(&shader->binary, config, offset);
+ if (code_object) {
+ code_object_to_config(code_object, config);
+ } else {
+ si_shader_binary_read_config(&shader->binary, config, offset);
+ }
lds_blocks = config->lds_size;
/* XXX: We are over allocating LDS. For SI, the shader reports
lds_blocks += align(program->local_size, 512) >> 9;
}
+ /* TODO: use si_multiwave_lds_size_workaround */
assert(lds_blocks <= 0xFF);
config->rsrc2 &= C_00B84C_LDS_SIZE;
RADEON_PRIO_SCRATCH_BUFFER);
}
+ /* Prefetch the compute shader to TC L2.
+ *
+ * We should also prefetch graphics shaders if a compute dispatch was
+ * the last command, and the compute shader if a draw call was the last
+ * command. However, that would add more complexity and we're likely
+ * to get a shader state change in that case anyway.
+ */
+ if (sctx->b.chip_class >= CIK) {
+ cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
+ 0, program->shader.bo->b.b.width0);
+ }
+
shader_va = shader->bo->gpu_address + offset;
+ if (program->use_code_object_v2) {
+ /* Shader code is placed after the amd_kernel_code_t
+ * struct. */
+ shader_va += sizeof(amd_kernel_code_t);
+ }
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
radeon_emit(cs, config->rsrc1);
radeon_emit(cs, config->rsrc2);
+ COMPUTE_DBG(sctx->screen, "COMPUTE_PGM_RSRC1: 0x%08x "
+ "COMPUTE_PGM_RSRC2: 0x%08x\n", config->rsrc1, config->rsrc2);
+
radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
S_00B860_WAVES(sctx->scratch_waves)
| S_00B860_WAVESIZE(config->scratch_bytes_per_wave >> 10));
return true;
}
-static void si_upload_compute_input(struct si_context *sctx,
- const struct pipe_grid_info *info)
+static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
+ const amd_kernel_code_t *code_object,
+ unsigned user_sgpr)
+{
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
+
+ unsigned max_private_element_size = AMD_HSA_BITS_GET(
+ code_object->code_properties,
+ AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE);
+
+ uint32_t scratch_dword0 = scratch_va & 0xffffffff;
+ uint32_t scratch_dword1 =
+ S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
+ S_008F04_SWIZZLE_ENABLE(1);
+
+ /* Disable address clamping */
+ uint32_t scratch_dword2 = 0xffffffff;
+ uint32_t scratch_dword3 =
+ S_008F0C_INDEX_STRIDE(3) |
+ S_008F0C_ADD_TID_ENABLE(1);
+
+ if (sctx->b.chip_class >= GFX9) {
+ assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
+ } else {
+ scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
+
+ if (sctx->b.chip_class < VI) {
+ /* BUF_DATA_FORMAT is ignored, but it cannot be
+ * BUF_DATA_FORMAT_INVALID. */
+ scratch_dword3 |=
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
+ }
+ }
+
+ radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
+ (user_sgpr * 4), 4);
+ radeon_emit(cs, scratch_dword0);
+ radeon_emit(cs, scratch_dword1);
+ radeon_emit(cs, scratch_dword2);
+ radeon_emit(cs, scratch_dword3);
+}
+
+static void si_setup_user_sgprs_co_v2(struct si_context *sctx,
+ const amd_kernel_code_t *code_object,
+ const struct pipe_grid_info *info,
+ uint64_t kernel_args_va)
+{
+ struct si_compute *program = sctx->cs_shader_state.program;
+ struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+
+ static const enum amd_code_property_mask_t workgroup_count_masks [] = {
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z
+ };
+
+ unsigned i, user_sgpr = 0;
+ if (AMD_HSA_BITS_GET(code_object->code_properties,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER)) {
+ if (code_object->workitem_private_segment_byte_size > 0) {
+ setup_scratch_rsrc_user_sgprs(sctx, code_object,
+ user_sgpr);
+ }
+ user_sgpr += 4;
+ }
+
+ if (AMD_HSA_BITS_GET(code_object->code_properties,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) {
+ struct dispatch_packet dispatch;
+ unsigned dispatch_offset;
+ struct r600_resource *dispatch_buf = NULL;
+ uint64_t dispatch_va;
+
+ /* Upload dispatch ptr */
+ memset(&dispatch, 0, sizeof(dispatch));
+
+ dispatch.workgroup_size_x = info->block[0];
+ dispatch.workgroup_size_y = info->block[1];
+ dispatch.workgroup_size_z = info->block[2];
+
+ dispatch.grid_size_x = info->grid[0] * info->block[0];
+ dispatch.grid_size_y = info->grid[1] * info->block[1];
+ dispatch.grid_size_z = info->grid[2] * info->block[2];
+
+ dispatch.private_segment_size = program->private_size;
+ dispatch.group_segment_size = program->local_size;
+
+ dispatch.kernarg_address = kernel_args_va;
+
+ u_upload_data(sctx->b.b.const_uploader, 0, sizeof(dispatch),
+ 256, &dispatch, &dispatch_offset,
+ (struct pipe_resource**)&dispatch_buf);
+
+ if (!dispatch_buf) {
+ fprintf(stderr, "Error: Failed to allocate dispatch "
+ "packet.");
+ }
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, dispatch_buf,
+ RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
+
+ dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
+
+ radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
+ (user_sgpr * 4), 2);
+ radeon_emit(cs, dispatch_va);
+ radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) |
+ S_008F04_STRIDE(0));
+
+ r600_resource_reference(&dispatch_buf, NULL);
+ user_sgpr += 2;
+ }
+
+ if (AMD_HSA_BITS_GET(code_object->code_properties,
+ AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR)) {
+ radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
+ (user_sgpr * 4), 2);
+ radeon_emit(cs, kernel_args_va);
+ radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
+ S_008F04_STRIDE(0));
+ user_sgpr += 2;
+ }
+
+ for (i = 0; i < 3 && user_sgpr < 16; i++) {
+ if (code_object->code_properties & workgroup_count_masks[i]) {
+ radeon_set_sh_reg_seq(cs,
+ R_00B900_COMPUTE_USER_DATA_0 +
+ (user_sgpr * 4), 1);
+ radeon_emit(cs, info->grid[i]);
+ user_sgpr += 1;
+ }
+ }
+}
+
+static bool si_upload_compute_input(struct si_context *sctx,
+ const amd_kernel_code_t *code_object,
+ const struct pipe_grid_info *info)
{
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
struct si_compute *program = sctx->cs_shader_state.program;
struct r600_resource *input_buffer = NULL;
unsigned kernel_args_size;
- unsigned num_work_size_bytes = 36;
+ unsigned num_work_size_bytes = program->use_code_object_v2 ? 0 : 36;
uint32_t kernel_args_offset = 0;
uint32_t *kernel_args;
void *kernel_args_ptr;
/* The extra num_work_size_bytes are for work group / work item size information */
kernel_args_size = program->input_size + num_work_size_bytes;
- u_upload_alloc(sctx->b.uploader, 0, kernel_args_size, 256,
+ u_upload_alloc(sctx->b.b.const_uploader, 0, kernel_args_size,
+ sctx->screen->b.info.tcc_cache_line_size,
&kernel_args_offset,
(struct pipe_resource**)&input_buffer, &kernel_args_ptr);
+ if (unlikely(!kernel_args_ptr))
+ return false;
+
kernel_args = (uint32_t*)kernel_args_ptr;
- for (i = 0; i < 3; i++) {
- kernel_args[i] = info->grid[i];
- kernel_args[i + 3] = info->grid[i] * info->block[i];
- kernel_args[i + 6] = info->block[i];
+ kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
+
+ if (!code_object) {
+ for (i = 0; i < 3; i++) {
+ kernel_args[i] = info->grid[i];
+ kernel_args[i + 3] = info->grid[i] * info->block[i];
+ kernel_args[i + 6] = info->block[i];
+ }
}
memcpy(kernel_args + (num_work_size_bytes / 4), info->input,
kernel_args[i]);
}
- kernel_args_va = input_buffer->gpu_address + kernel_args_offset;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
- radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
- radeon_emit(cs, kernel_args_va);
- radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
- S_008F04_STRIDE(0));
+ if (code_object) {
+ si_setup_user_sgprs_co_v2(sctx, code_object, info, kernel_args_va);
+ } else {
+ radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
+ radeon_emit(cs, kernel_args_va);
+ radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
+ S_008F04_STRIDE(0));
+ }
r600_resource_reference(&input_buffer, NULL);
+
+ return true;
}
static void si_setup_tgsi_grid(struct si_context *sctx,
const struct pipe_grid_info *info)
{
+ struct si_compute *program = sctx->cs_shader_state.program;
struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
- 4 * SI_SGPR_GRID_SIZE;
+ 4 * SI_NUM_RESOURCE_SGPRS;
+ unsigned block_size_reg = grid_size_reg +
+ /* 12 bytes = 3 dwords. */
+ 12 * program->uses_grid_size;
if (info->indirect) {
- uint64_t base_va = r600_resource(info->indirect)->gpu_address;
- uint64_t va = base_va + info->indirect_offset;
- int i;
-
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource *)info->indirect,
- RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
-
- for (i = 0; i < 3; ++i) {
- radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
- radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
- COPY_DATA_DST_SEL(COPY_DATA_REG));
- radeon_emit(cs, (va + 4 * i));
- radeon_emit(cs, (va + 4 * i) >> 32);
- radeon_emit(cs, (grid_size_reg >> 2) + i);
- radeon_emit(cs, 0);
+ if (program->uses_grid_size) {
+ uint64_t base_va = r600_resource(info->indirect)->gpu_address;
+ uint64_t va = base_va + info->indirect_offset;
+ int i;
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ (struct r600_resource *)info->indirect,
+ RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
+
+ for (i = 0; i < 3; ++i) {
+ radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
+ radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
+ COPY_DATA_DST_SEL(COPY_DATA_REG));
+ radeon_emit(cs, (va + 4 * i));
+ radeon_emit(cs, (va + 4 * i) >> 32);
+ radeon_emit(cs, (grid_size_reg >> 2) + i);
+ radeon_emit(cs, 0);
+ }
}
} else {
-
- radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
- radeon_emit(cs, info->grid[0]);
- radeon_emit(cs, info->grid[1]);
- radeon_emit(cs, info->grid[2]);
+ if (program->uses_grid_size) {
+ radeon_set_sh_reg_seq(cs, grid_size_reg, 3);
+ radeon_emit(cs, info->grid[0]);
+ radeon_emit(cs, info->grid[1]);
+ radeon_emit(cs, info->grid[2]);
+ }
+ if (program->variable_group_size && program->uses_block_size) {
+ radeon_set_sh_reg_seq(cs, block_size_reg, 3);
+ radeon_emit(cs, info->block[0]);
+ radeon_emit(cs, info->block[1]);
+ radeon_emit(cs, info->block[2]);
+ }
}
}
radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]));
radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]));
+ unsigned dispatch_initiator =
+ S_00B800_COMPUTE_SHADER_EN(1) |
+ S_00B800_FORCE_START_AT_000(1) |
+ /* If the KMD allows it (there is a KMD hw register for it),
+ * allow launching waves out-of-order. (same as Vulkan) */
+ S_00B800_ORDER_MODE(sctx->b.chip_class >= CIK);
+
if (info->indirect) {
uint64_t base_va = r600_resource(info->indirect)->gpu_address;
radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, render_cond_bit) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, info->indirect_offset);
- radeon_emit(cs, 1);
+ radeon_emit(cs, dispatch_initiator);
} else {
radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, render_cond_bit) |
PKT3_SHADER_TYPE_S(1));
radeon_emit(cs, info->grid[0]);
radeon_emit(cs, info->grid[1]);
radeon_emit(cs, info->grid[2]);
- radeon_emit(cs, 1);
+ radeon_emit(cs, dispatch_initiator);
}
}
{
struct si_context *sctx = (struct si_context*)ctx;
struct si_compute *program = sctx->cs_shader_state.program;
+ const amd_kernel_code_t *code_object =
+ si_compute_get_code_object(program, info->pc);
int i;
/* HW bug workaround when CS threadgroups > 256 threads and async
* compute isn't used, i.e. only one compute job can run at a time.
sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
SI_CONTEXT_CS_PARTIAL_FLUSH;
+ if (program->ir_type == PIPE_SHADER_IR_TGSI &&
+ program->shader.compilation_failed)
+ return;
+
+ if (sctx->b.last_num_draw_calls != sctx->b.num_draw_calls) {
+ si_update_fb_dirtiness_after_rendering(sctx);
+ sctx->b.last_num_draw_calls = sctx->b.num_draw_calls;
+ }
+
si_decompress_compute_textures(sctx);
/* Add buffer sizes for memory checking in need_cs_space. */
r600_context_add_resource_size(ctx, &program->shader.bo->b.b);
- if (info->indirect)
- r600_context_add_resource_size(ctx, info->indirect);
/* TODO: add the scratch buffer */
+ if (info->indirect) {
+ r600_context_add_resource_size(ctx, info->indirect);
+
+ /* Indirect buffers use TC L2 on GFX9, but not older hw. */
+ if (sctx->b.chip_class <= VI &&
+ r600_resource(info->indirect)->TC_L2_dirty) {
+ sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ r600_resource(info->indirect)->TC_L2_dirty = false;
+ }
+ }
+
si_need_cs_space(sctx);
+ if (sctx->b.log)
+ si_log_compute_state(sctx, sctx->b.log);
+
if (!sctx->cs_shader_state.initialized)
si_initialize_compute(sctx);
if (sctx->b.flags)
- si_emit_cache_flush(sctx, NULL);
+ si_emit_cache_flush(sctx);
- if (!si_switch_compute_shader(sctx, program, &program->shader, info->pc))
+ if (!si_switch_compute_shader(sctx, program, &program->shader,
+ code_object, info->pc))
return;
si_upload_compute_shader_descriptors(sctx);
- si_emit_compute_shader_userdata(sctx);
+ si_emit_compute_shader_pointers(sctx);
if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
sctx->atoms.s.render_cond->emit(&sctx->b,
si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
}
- if (program->input_size || program->ir_type == PIPE_SHADER_IR_NATIVE)
- si_upload_compute_input(sctx, info);
+ if ((program->input_size ||
+ program->ir_type == PIPE_SHADER_IR_NATIVE) &&
+ unlikely(!si_upload_compute_input(sctx, code_object, info))) {
+ return;
+ }
/* Global buffers */
for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
if (program->ir_type == PIPE_SHADER_IR_TGSI)
si_setup_tgsi_grid(sctx, info);
- si_ce_pre_draw_synchronization(sctx);
-
si_emit_dispatch_packets(sctx, info);
- si_ce_post_draw_synchronization(sctx);
+ if (unlikely(sctx->current_saved_cs))
+ si_trace_emit(sctx);
+ sctx->compute_is_busy = true;
sctx->b.num_compute_calls++;
if (sctx->cs_shader_state.uses_scratch)
sctx->b.num_spill_compute_calls++;
sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
}
+void si_destroy_compute(struct si_compute *program)
+{
+ if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+ util_queue_drop_job(&program->screen->shader_compiler_queue,
+ &program->ready);
+ util_queue_fence_destroy(&program->ready);
+ }
+
+ si_shader_destroy(&program->shader);
+ FREE(program);
+}
static void si_delete_compute_state(struct pipe_context *ctx, void* state){
struct si_compute *program = (struct si_compute *)state;
struct si_context *sctx = (struct si_context*)ctx;
- if (!state) {
+ if (!state)
return;
- }
if (program == sctx->cs_shader_state.program)
sctx->cs_shader_state.program = NULL;
if (program == sctx->cs_shader_state.emitted_program)
sctx->cs_shader_state.emitted_program = NULL;
- si_shader_destroy(&program->shader);
- FREE(program);
+ si_compute_reference(&program, NULL);
}
static void si_set_compute_resources(struct pipe_context * ctx_,