X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fradeonsi%2Fsi_compute.c;h=0989181aba46fce0132c69292e78aceedd38a656;hb=cb82d534a0c3ea65c0f24586fcb877d912eec130;hp=f3dce65e3dd58c287b83a2934d8ae724b793c86d;hpb=2208b760f338fbd9176f4375e23f0ba20a05ce96;p=mesa.git diff --git a/src/gallium/drivers/radeonsi/si_compute.c b/src/gallium/drivers/radeonsi/si_compute.c index f3dce65e3dd..0989181aba4 100644 --- a/src/gallium/drivers/radeonsi/si_compute.c +++ b/src/gallium/drivers/radeonsi/si_compute.c @@ -1,5 +1,6 @@ /* * Copyright 2013 Advanced Micro Devices, Inc. + * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -27,15 +28,14 @@ #include "util/u_memory.h" #include "util/u_upload_mgr.h" +#include "ac_rtld.h" #include "amd_kernel_code_t.h" -#include "radeon/r600_cs.h" -#include "si_pipe.h" +#include "si_build_pm4.h" #include "si_compute.h" -#include "sid.h" -#define COMPUTE_DBG(rscreen, fmt, args...) \ +#define COMPUTE_DBG(sscreen, fmt, args...) \ do { \ - if ((rscreen->b.debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \ + if ((sscreen->debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \ } while (0); struct dispatch_packet { @@ -62,12 +62,34 @@ static const amd_kernel_code_t *si_compute_get_code_object( if (!program->use_code_object_v2) { return NULL; } - return (const amd_kernel_code_t*) - (program->shader.binary.code + symbol_offset); + + struct ac_rtld_binary rtld; + if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){ + .info = &program->screen->info, + .shader_type = MESA_SHADER_COMPUTE, + .num_parts = 1, + .elf_ptrs = &program->shader.binary.elf_buffer, + .elf_sizes = &program->shader.binary.elf_size })) + return NULL; + + const amd_kernel_code_t *result = NULL; + const char *text; + size_t size; + if (!ac_rtld_get_section_by_name(&rtld, ".text", &text, &size)) + goto out; + + if (symbol_offset + sizeof(amd_kernel_code_t) > size) + goto out; + + result = (const amd_kernel_code_t*)(text + symbol_offset); + +out: + ac_rtld_close(&rtld); + return result; } static void code_object_to_config(const amd_kernel_code_t *code_object, - struct si_shader_config *out_config) { + struct ac_shader_config *out_config) { uint32_t rsrc1 = code_object->compute_pgm_resource_registers; uint32_t rsrc2 = code_object->compute_pgm_resource_registers >> 32; @@ -87,21 +109,37 @@ static void si_create_compute_state_async(void *job, int thread_index) struct si_compute *program = (struct si_compute *)job; struct si_shader *shader = &program->shader; struct si_shader_selector sel; - LLVMTargetMachineRef tm; + struct ac_llvm_compiler *compiler; struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug; + struct si_screen *sscreen = program->screen; assert(!debug->debug_message || debug->async); assert(thread_index >= 0); - assert(thread_index < ARRAY_SIZE(program->screen->tm)); - tm = program->screen->tm[thread_index]; + assert(thread_index < ARRAY_SIZE(sscreen->compiler)); + compiler = &sscreen->compiler[thread_index]; memset(&sel, 0, sizeof(sel)); - sel.screen = program->screen; - tgsi_scan_shader(program->tokens, &sel.info); - sel.tokens = program->tokens; + sel.screen = sscreen; + + if (program->ir_type == PIPE_SHADER_IR_TGSI) { + tgsi_scan_shader(program->ir.tgsi, &sel.info); + sel.tokens = program->ir.tgsi; + } else { + assert(program->ir_type == PIPE_SHADER_IR_NIR); + sel.nir = program->ir.nir; + + si_nir_opts(sel.nir); + si_nir_scan_shader(sel.nir, &sel.info); + si_lower_nir(&sel); + } + + /* Store the declared LDS size into tgsi_shader_info for the shader + * cache to include it. + */ + sel.info.properties[TGSI_PROPERTY_CS_LOCAL_SIZE] = program->local_size; + sel.type = PIPE_SHADER_COMPUTE; - sel.local_size = program->local_size; si_get_active_slot_masks(&sel.info, &program->active_const_and_shader_buffers, &program->active_samplers_and_images); @@ -109,24 +147,58 @@ static void si_create_compute_state_async(void *job, int thread_index) program->shader.selector = &sel; program->shader.is_monolithic = true; program->uses_grid_size = sel.info.uses_grid_size; - program->uses_block_size = sel.info.uses_block_size; program->uses_bindless_samplers = sel.info.uses_bindless_samplers; program->uses_bindless_images = sel.info.uses_bindless_images; + program->reads_variable_block_size = + sel.info.uses_block_size && + sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0; + program->num_cs_user_data_dwords = + sel.info.properties[TGSI_PROPERTY_CS_USER_DATA_DWORDS]; + + void *ir_binary = si_get_ir_binary(&sel); + + /* Try to load the shader from the shader cache. */ + mtx_lock(&sscreen->shader_cache_mutex); + + if (ir_binary && + si_shader_cache_load_shader(sscreen, ir_binary, shader)) { + mtx_unlock(&sscreen->shader_cache_mutex); - if (si_shader_create(program->screen, tm, &program->shader, debug)) { - program->shader.compilation_failed = true; + si_shader_dump_stats_for_shader_db(sscreen, shader, debug); + si_shader_dump(sscreen, shader, debug, stderr, true); + + if (!si_shader_binary_upload(sscreen, shader, 0)) + program->shader.compilation_failed = true; } else { + mtx_unlock(&sscreen->shader_cache_mutex); + + if (!si_shader_create(sscreen, compiler, &program->shader, debug)) { + program->shader.compilation_failed = true; + + if (program->ir_type == PIPE_SHADER_IR_TGSI) + FREE(program->ir.tgsi); + program->shader.selector = NULL; + return; + } + bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0; unsigned user_sgprs = SI_NUM_RESOURCE_SGPRS + (sel.info.uses_grid_size ? 3 : 0) + - (sel.info.uses_block_size ? 3 : 0); + (program->reads_variable_block_size ? 3 : 0) + + program->num_cs_user_data_dwords; shader->config.rsrc1 = S_00B848_VGPRS((shader->config.num_vgprs - 1) / 4) | - S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8) | S_00B848_DX10_CLAMP(1) | + S_00B848_MEM_ORDERED(sscreen->info.chip_class >= GFX10) | + S_00B848_WGP_MODE(sscreen->info.chip_class >= GFX10) | S_00B848_FLOAT_MODE(shader->config.float_mode); + if (program->screen->info.chip_class < GFX10) { + shader->config.rsrc1 |= + S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8); + } + shader->config.rsrc2 = S_00B84C_USER_SGPR(user_sgprs) | S_00B84C_SCRATCH_EN(scratch_enabled) | @@ -137,11 +209,17 @@ static void si_create_compute_state_async(void *job, int thread_index) sel.info.uses_thread_id[1] ? 1 : 0) | S_00B84C_LDS_SIZE(shader->config.lds_size); - program->variable_group_size = - sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0; + if (ir_binary) { + mtx_lock(&sscreen->shader_cache_mutex); + if (!si_shader_cache_insert_shader(sscreen, ir_binary, shader, true)) + FREE(ir_binary); + mtx_unlock(&sscreen->shader_cache_mutex); + } } - FREE(program->tokens); + if (program->ir_type == PIPE_SHADER_IR_TGSI) + FREE(program->ir.tgsi); + program->shader.selector = NULL; } @@ -159,60 +237,50 @@ static void *si_create_compute_state( program->local_size = cso->req_local_mem; program->private_size = cso->req_private_mem; program->input_size = cso->req_input_mem; - program->use_code_object_v2 = HAVE_LLVM >= 0x0400 && - cso->ir_type == PIPE_SHADER_IR_NATIVE; - - if (cso->ir_type == PIPE_SHADER_IR_TGSI) { - program->tokens = tgsi_dup_tokens(cso->prog); - if (!program->tokens) { - FREE(program); - return NULL; + program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE; + + if (cso->ir_type != PIPE_SHADER_IR_NATIVE) { + if (cso->ir_type == PIPE_SHADER_IR_TGSI) { + program->ir.tgsi = tgsi_dup_tokens(cso->prog); + if (!program->ir.tgsi) { + FREE(program); + return NULL; + } + } else { + assert(cso->ir_type == PIPE_SHADER_IR_NIR); + program->ir.nir = (struct nir_shader *) cso->prog; } program->compiler_ctx_state.debug = sctx->debug; program->compiler_ctx_state.is_debug_context = sctx->is_debug; - p_atomic_inc(&sscreen->b.num_shaders_created); - util_queue_fence_init(&program->ready); - - struct util_async_debug_callback async_debug; - bool wait = - (sctx->debug.debug_message && !sctx->debug.async) || - sctx->is_debug || - si_can_dump_shader(sscreen, PIPE_SHADER_COMPUTE); - - if (wait) { - u_async_debug_init(&async_debug); - program->compiler_ctx_state.debug = async_debug.base; - } + p_atomic_inc(&sscreen->num_shaders_created); - util_queue_add_job(&sscreen->shader_compiler_queue, - program, &program->ready, - si_create_compute_state_async, NULL); - - if (wait) { - util_queue_fence_wait(&program->ready); - u_async_debug_drain(&async_debug, &sctx->debug); - u_async_debug_cleanup(&async_debug); - } + si_schedule_initial_compile(sctx, PIPE_SHADER_COMPUTE, + &program->ready, + &program->compiler_ctx_state, + program, si_create_compute_state_async); } else { const struct pipe_llvm_program_header *header; const char *code; header = cso->prog; code = cso->prog + sizeof(struct pipe_llvm_program_header); - ac_elf_read(code, header->num_bytes, &program->shader.binary); - if (program->use_code_object_v2) { - const amd_kernel_code_t *code_object = - si_compute_get_code_object(program, 0); - code_object_to_config(code_object, &program->shader.config); - } else { - si_shader_binary_read_config(&program->shader.binary, - &program->shader.config, 0); + program->shader.binary.elf_size = header->num_bytes; + program->shader.binary.elf_buffer = malloc(header->num_bytes); + if (!program->shader.binary.elf_buffer) { + FREE(program); + return NULL; } - si_shader_dump(sctx->screen, &program->shader, &sctx->debug, - PIPE_SHADER_COMPUTE, stderr, true); - if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) { + memcpy((void *)program->shader.binary.elf_buffer, code, header->num_bytes); + + const amd_kernel_code_t *code_object = + si_compute_get_code_object(program, 0); + code_object_to_config(code_object, &program->shader.config); + + si_shader_dump(sctx->screen, &program->shader, &sctx->debug, stderr, true); + if (!si_shader_binary_upload(sctx->screen, &program->shader, 0)) { fprintf(stderr, "LLVM failed to upload shader\n"); + free((void *)program->shader.binary.elf_buffer); FREE(program); return NULL; } @@ -231,7 +299,7 @@ static void si_bind_compute_state(struct pipe_context *ctx, void *state) return; /* Wait because we need active slot usage masks. */ - if (program->ir_type == PIPE_SHADER_IR_TGSI) + if (program->ir_type != PIPE_SHADER_IR_NATIVE) util_queue_fence_wait(&program->ready); si_set_active_descriptors(sctx, @@ -266,7 +334,7 @@ static void si_set_global_binding( uint64_t va; uint32_t offset; pipe_resource_reference(&program->global_buffers[first + i], resources[i]); - va = r600_resource(resources[i])->gpu_address; + va = si_resource(resources[i])->gpu_address; offset = util_le32_to_cpu(*handles[i]); va += offset; va = util_cpu_to_le64(va); @@ -274,32 +342,35 @@ static void si_set_global_binding( } } -static void si_initialize_compute(struct si_context *sctx) +void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf *cs) { - struct radeon_winsys_cs *cs = sctx->b.gfx.cs; uint64_t bc_va; radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2); - /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1 */ + /* R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0 / SE1, + * renamed COMPUTE_DESTINATION_EN_SEn on gfx10. */ + radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff)); radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff)); - radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff)); - if (sctx->b.chip_class >= CIK) { + if (sctx->chip_class >= GFX7) { /* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */ radeon_set_sh_reg_seq(cs, R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2); - radeon_emit(cs, S_00B864_SH0_CU_EN(0xffff) | - S_00B864_SH1_CU_EN(0xffff)); - radeon_emit(cs, S_00B868_SH0_CU_EN(0xffff) | - S_00B868_SH1_CU_EN(0xffff)); + radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | + S_00B858_SH1_CU_EN(0xffff)); + radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | + S_00B858_SH1_CU_EN(0xffff)); } + if (sctx->chip_class >= GFX10) + radeon_set_sh_reg(cs, R_00B8A0_COMPUTE_PGM_RSRC3, 0); + /* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID * and is now per pipe, so it should be handled in the * kernel if we want to use something other than the default value, * which is now 0x22f. */ - if (sctx->b.chip_class <= SI) { + if (sctx->chip_class <= GFX6) { /* XXX: This should be: * (number of compute units) * 4 * (waves per simd) - 1 */ @@ -310,26 +381,21 @@ static void si_initialize_compute(struct si_context *sctx) /* Set the pointer to border colors. */ bc_va = sctx->border_color_buffer->gpu_address; - if (sctx->b.chip_class >= CIK) { + if (sctx->chip_class >= GFX7) { radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2); radeon_emit(cs, bc_va >> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */ - radeon_emit(cs, bc_va >> 40); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */ + radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */ } else { - if (sctx->screen->b.info.drm_major == 3 || - (sctx->screen->b.info.drm_major == 2 && - sctx->screen->b.info.drm_minor >= 48)) { + if (sctx->screen->info.si_TA_CS_BC_BASE_ADDR_allowed) { radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR, bc_va >> 8); } } - - sctx->cs_shader_state.emitted_program = NULL; - sctx->cs_shader_state.initialized = true; } static bool si_setup_compute_scratch_buffer(struct si_context *sctx, struct si_shader *shader, - struct si_shader_config *config) + struct ac_shader_config *config) { uint64_t scratch_bo_size, scratch_needed; scratch_bo_size = 0; @@ -338,13 +404,13 @@ static bool si_setup_compute_scratch_buffer(struct si_context *sctx, scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0; if (scratch_bo_size < scratch_needed) { - r600_resource_reference(&sctx->compute_scratch_buffer, NULL); + si_resource_reference(&sctx->compute_scratch_buffer, NULL); - sctx->compute_scratch_buffer = (struct r600_resource*) - si_aligned_buffer_create(&sctx->screen->b.b, - R600_RESOURCE_FLAG_UNMAPPABLE, - PIPE_USAGE_DEFAULT, - scratch_needed, 256); + sctx->compute_scratch_buffer = + si_aligned_buffer_create(&sctx->screen->b, + SI_RESOURCE_FLAG_UNMAPPABLE, + PIPE_USAGE_DEFAULT, + scratch_needed, 256); if (!sctx->compute_scratch_buffer) return false; @@ -353,12 +419,10 @@ static bool si_setup_compute_scratch_buffer(struct si_context *sctx, if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) { uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address; - si_shader_apply_scratch_relocs(shader, scratch_va); - - if (si_shader_binary_upload(sctx->screen, shader)) + if (!si_shader_binary_upload(sctx->screen, shader, scratch_va)) return false; - r600_resource_reference(&shader->scratch_bo, + si_resource_reference(&shader->scratch_bo, sctx->compute_scratch_buffer); } @@ -371,34 +435,30 @@ static bool si_switch_compute_shader(struct si_context *sctx, const amd_kernel_code_t *code_object, unsigned offset) { - struct radeon_winsys_cs *cs = sctx->b.gfx.cs; - struct si_shader_config inline_config = {0}; - struct si_shader_config *config; + struct radeon_cmdbuf *cs = sctx->gfx_cs; + struct ac_shader_config inline_config = {0}; + struct ac_shader_config *config; uint64_t shader_va; if (sctx->cs_shader_state.emitted_program == program && sctx->cs_shader_state.offset == offset) return true; - if (program->ir_type == PIPE_SHADER_IR_TGSI) { + if (program->ir_type != PIPE_SHADER_IR_NATIVE) { config = &shader->config; } else { unsigned lds_blocks; config = &inline_config; - if (code_object) { - code_object_to_config(code_object, config); - } else { - si_shader_binary_read_config(&shader->binary, config, offset); - } + code_object_to_config(code_object, config); lds_blocks = config->lds_size; - /* XXX: We are over allocating LDS. For SI, the shader reports + /* XXX: We are over allocating LDS. For GFX6, the shader reports * LDS in blocks of 256 bytes, so if there are 4 bytes lds * allocated in the shader and 4 bytes allocated by the state * tracker, then we will set LDS_SIZE to 512 bytes rather than 256. */ - if (sctx->b.chip_class <= SI) { + if (sctx->chip_class <= GFX6) { lds_blocks += align(program->local_size, 256) >> 8; } else { lds_blocks += align(program->local_size, 512) >> 9; @@ -421,7 +481,7 @@ static bool si_switch_compute_shader(struct si_context *sctx, config->scratch_bytes_per_wave * sctx->scratch_waves); - radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, + radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->scratch_bo, RADEON_USAGE_READWRITE, RADEON_PRIO_SCRATCH_BUFFER); } @@ -433,7 +493,7 @@ static bool si_switch_compute_shader(struct si_context *sctx, * command. However, that would add more complexity and we're likely * to get a shader state change in that case anyway. */ - if (sctx->b.chip_class >= CIK) { + if (sctx->chip_class >= GFX7) { cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b, 0, program->shader.bo->b.b.width0); } @@ -445,12 +505,12 @@ static bool si_switch_compute_shader(struct si_context *sctx, shader_va += sizeof(amd_kernel_code_t); } - radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo, + radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2); radeon_emit(cs, shader_va >> 8); - radeon_emit(cs, shader_va >> 40); + radeon_emit(cs, S_00B834_DATA(shader_va >> 40)); radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2); radeon_emit(cs, config->rsrc1); @@ -475,7 +535,7 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx, const amd_kernel_code_t *code_object, unsigned user_sgpr) { - struct radeon_winsys_cs *cs = sctx->b.gfx.cs; + struct radeon_cmdbuf *cs = sctx->gfx_cs; uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address; unsigned max_private_element_size = AMD_HSA_BITS_GET( @@ -493,12 +553,12 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx, S_008F0C_INDEX_STRIDE(3) | S_008F0C_ADD_TID_ENABLE(1); - if (sctx->b.chip_class >= GFX9) { + if (sctx->chip_class >= GFX9) { assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */ } else { scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size); - if (sctx->b.chip_class < VI) { + if (sctx->chip_class < GFX8) { /* BUF_DATA_FORMAT is ignored, but it cannot be * BUF_DATA_FORMAT_INVALID. */ scratch_dword3 |= @@ -520,7 +580,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx, uint64_t kernel_args_va) { struct si_compute *program = sctx->cs_shader_state.program; - struct radeon_winsys_cs *cs = sctx->b.gfx.cs; + struct radeon_cmdbuf *cs = sctx->gfx_cs; static const enum amd_code_property_mask_t workgroup_count_masks [] = { AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X, @@ -542,26 +602,26 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx, AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR)) { struct dispatch_packet dispatch; unsigned dispatch_offset; - struct r600_resource *dispatch_buf = NULL; + struct si_resource *dispatch_buf = NULL; uint64_t dispatch_va; /* Upload dispatch ptr */ memset(&dispatch, 0, sizeof(dispatch)); - dispatch.workgroup_size_x = info->block[0]; - dispatch.workgroup_size_y = info->block[1]; - dispatch.workgroup_size_z = info->block[2]; + dispatch.workgroup_size_x = util_cpu_to_le16(info->block[0]); + dispatch.workgroup_size_y = util_cpu_to_le16(info->block[1]); + dispatch.workgroup_size_z = util_cpu_to_le16(info->block[2]); - dispatch.grid_size_x = info->grid[0] * info->block[0]; - dispatch.grid_size_y = info->grid[1] * info->block[1]; - dispatch.grid_size_z = info->grid[2] * info->block[2]; + dispatch.grid_size_x = util_cpu_to_le32(info->grid[0] * info->block[0]); + dispatch.grid_size_y = util_cpu_to_le32(info->grid[1] * info->block[1]); + dispatch.grid_size_z = util_cpu_to_le32(info->grid[2] * info->block[2]); - dispatch.private_segment_size = program->private_size; - dispatch.group_segment_size = program->local_size; + dispatch.private_segment_size = util_cpu_to_le32(program->private_size); + dispatch.group_segment_size = util_cpu_to_le32(program->local_size); - dispatch.kernarg_address = kernel_args_va; + dispatch.kernarg_address = util_cpu_to_le64(kernel_args_va); - u_upload_data(sctx->b.b.const_uploader, 0, sizeof(dispatch), + u_upload_data(sctx->b.const_uploader, 0, sizeof(dispatch), 256, &dispatch, &dispatch_offset, (struct pipe_resource**)&dispatch_buf); @@ -569,7 +629,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx, fprintf(stderr, "Error: Failed to allocate dispatch " "packet."); } - radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, dispatch_buf, + radeon_add_to_buffer_list(sctx, sctx->gfx_cs, dispatch_buf, RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER); dispatch_va = dispatch_buf->gpu_address + dispatch_offset; @@ -580,7 +640,7 @@ static void si_setup_user_sgprs_co_v2(struct si_context *sctx, radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(dispatch_va >> 32) | S_008F04_STRIDE(0)); - r600_resource_reference(&dispatch_buf, NULL); + si_resource_reference(&dispatch_buf, NULL); user_sgpr += 2; } @@ -609,9 +669,9 @@ static bool si_upload_compute_input(struct si_context *sctx, const amd_kernel_code_t *code_object, const struct pipe_grid_info *info) { - struct radeon_winsys_cs *cs = sctx->b.gfx.cs; + struct radeon_cmdbuf *cs = sctx->gfx_cs; struct si_compute *program = sctx->cs_shader_state.program; - struct r600_resource *input_buffer = NULL; + struct si_resource *input_buffer = NULL; unsigned kernel_args_size; unsigned num_work_size_bytes = program->use_code_object_v2 ? 0 : 36; uint32_t kernel_args_offset = 0; @@ -623,8 +683,8 @@ static bool si_upload_compute_input(struct si_context *sctx, /* The extra num_work_size_bytes are for work group / work item size information */ kernel_args_size = program->input_size + num_work_size_bytes; - u_upload_alloc(sctx->b.b.const_uploader, 0, kernel_args_size, - sctx->screen->b.info.tcc_cache_line_size, + u_upload_alloc(sctx->b.const_uploader, 0, kernel_args_size, + sctx->screen->info.tcc_cache_line_size, &kernel_args_offset, (struct pipe_resource**)&input_buffer, &kernel_args_ptr); @@ -636,9 +696,9 @@ static bool si_upload_compute_input(struct si_context *sctx, if (!code_object) { for (i = 0; i < 3; i++) { - kernel_args[i] = info->grid[i]; - kernel_args[i + 3] = info->grid[i] * info->block[i]; - kernel_args[i + 6] = info->block[i]; + kernel_args[i] = util_cpu_to_le32(info->grid[i]); + kernel_args[i + 3] = util_cpu_to_le32(info->grid[i] * info->block[i]); + kernel_args[i + 6] = util_cpu_to_le32(info->block[i]); } } @@ -652,7 +712,7 @@ static bool si_upload_compute_input(struct si_context *sctx, } - radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer, + radeon_add_to_buffer_list(sctx, sctx->gfx_cs, input_buffer, RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER); if (code_object) { @@ -664,40 +724,31 @@ static bool si_upload_compute_input(struct si_context *sctx, S_008F04_STRIDE(0)); } - r600_resource_reference(&input_buffer, NULL); + si_resource_reference(&input_buffer, NULL); return true; } -static void si_setup_tgsi_grid(struct si_context *sctx, +static void si_setup_tgsi_user_data(struct si_context *sctx, const struct pipe_grid_info *info) { struct si_compute *program = sctx->cs_shader_state.program; - struct radeon_winsys_cs *cs = sctx->b.gfx.cs; + struct radeon_cmdbuf *cs = sctx->gfx_cs; unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 + 4 * SI_NUM_RESOURCE_SGPRS; unsigned block_size_reg = grid_size_reg + /* 12 bytes = 3 dwords. */ 12 * program->uses_grid_size; + unsigned cs_user_data_reg = block_size_reg + + 12 * program->reads_variable_block_size; if (info->indirect) { if (program->uses_grid_size) { - uint64_t base_va = r600_resource(info->indirect)->gpu_address; - uint64_t va = base_va + info->indirect_offset; - int i; - - radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, - (struct r600_resource *)info->indirect, - RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT); - - for (i = 0; i < 3; ++i) { - radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); - radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | - COPY_DATA_DST_SEL(COPY_DATA_REG)); - radeon_emit(cs, (va + 4 * i)); - radeon_emit(cs, (va + 4 * i) >> 32); - radeon_emit(cs, (grid_size_reg >> 2) + i); - radeon_emit(cs, 0); + for (unsigned i = 0; i < 3; ++i) { + si_cp_copy_data(sctx, sctx->gfx_cs, + COPY_DATA_REG, NULL, (grid_size_reg >> 2) + i, + COPY_DATA_SRC_MEM, si_resource(info->indirect), + info->indirect_offset + 4 * i); } } } else { @@ -707,29 +758,31 @@ static void si_setup_tgsi_grid(struct si_context *sctx, radeon_emit(cs, info->grid[1]); radeon_emit(cs, info->grid[2]); } - if (program->variable_group_size && program->uses_block_size) { + if (program->reads_variable_block_size) { radeon_set_sh_reg_seq(cs, block_size_reg, 3); radeon_emit(cs, info->block[0]); radeon_emit(cs, info->block[1]); radeon_emit(cs, info->block[2]); } } + + if (program->num_cs_user_data_dwords) { + radeon_set_sh_reg_seq(cs, cs_user_data_reg, program->num_cs_user_data_dwords); + radeon_emit_array(cs, sctx->cs_user_data, program->num_cs_user_data_dwords); + } } -static void si_emit_dispatch_packets(struct si_context *sctx, - const struct pipe_grid_info *info) +unsigned si_get_compute_resource_limits(struct si_screen *sscreen, + unsigned waves_per_threadgroup, + unsigned max_waves_per_sh, + unsigned threadgroups_per_cu) { - struct si_screen *sscreen = sctx->screen; - struct radeon_winsys_cs *cs = sctx->b.gfx.cs; - bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off; - unsigned waves_per_threadgroup = - DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64); unsigned compute_resource_limits = S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0); - if (sctx->b.chip_class >= CIK) { - unsigned num_cu_per_se = sscreen->b.info.num_good_compute_units / - sscreen->b.info.max_se; + if (sscreen->info.chip_class >= GFX7) { + unsigned num_cu_per_se = sscreen->info.num_good_compute_units / + sscreen->info.max_se; /* Force even distribution on all SIMDs in CU if the workgroup * size is 64. This has shown some good improvements if # of CUs @@ -737,28 +790,79 @@ static void si_emit_dispatch_packets(struct si_context *sctx, */ if (num_cu_per_se % 4 && waves_per_threadgroup == 1) compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1); + + assert(threadgroups_per_cu >= 1 && threadgroups_per_cu <= 8); + compute_resource_limits |= S_00B854_WAVES_PER_SH(max_waves_per_sh) | + S_00B854_CU_GROUP_COUNT(threadgroups_per_cu - 1); + } else { + /* GFX6 */ + if (max_waves_per_sh) { + unsigned limit_div16 = DIV_ROUND_UP(max_waves_per_sh, 16); + compute_resource_limits |= S_00B854_WAVES_PER_SH_SI(limit_div16); + } } + return compute_resource_limits; +} - radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS, - compute_resource_limits); +static void si_emit_dispatch_packets(struct si_context *sctx, + const struct pipe_grid_info *info) +{ + struct si_screen *sscreen = sctx->screen; + struct radeon_cmdbuf *cs = sctx->gfx_cs; + bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off; + unsigned threads_per_threadgroup = + info->block[0] * info->block[1] * info->block[2]; + unsigned waves_per_threadgroup = + DIV_ROUND_UP(threads_per_threadgroup, 64); + unsigned threadgroups_per_cu = 1; - radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); - radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0])); - radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1])); - radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2])); + if (sctx->chip_class >= GFX10 && waves_per_threadgroup == 1) + threadgroups_per_cu = 2; + + radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS, + si_get_compute_resource_limits(sscreen, waves_per_threadgroup, + sctx->cs_max_waves_per_sh, + threadgroups_per_cu)); unsigned dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) | S_00B800_FORCE_START_AT_000(1) | /* If the KMD allows it (there is a KMD hw register for it), * allow launching waves out-of-order. (same as Vulkan) */ - S_00B800_ORDER_MODE(sctx->b.chip_class >= CIK); + S_00B800_ORDER_MODE(sctx->chip_class >= GFX7); + + const uint *last_block = info->last_block; + bool partial_block_en = last_block[0] || last_block[1] || last_block[2]; + + radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); + + if (partial_block_en) { + unsigned partial[3]; + + /* If no partial_block, these should be an entire block size, not 0. */ + partial[0] = last_block[0] ? last_block[0] : info->block[0]; + partial[1] = last_block[1] ? last_block[1] : info->block[1]; + partial[2] = last_block[2] ? last_block[2] : info->block[2]; + + radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]) | + S_00B81C_NUM_THREAD_PARTIAL(partial[0])); + radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1]) | + S_00B820_NUM_THREAD_PARTIAL(partial[1])); + radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2]) | + S_00B824_NUM_THREAD_PARTIAL(partial[2])); + + dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1); + } else { + radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0])); + radeon_emit(cs, S_00B820_NUM_THREAD_FULL(info->block[1])); + radeon_emit(cs, S_00B824_NUM_THREAD_FULL(info->block[2])); + } if (info->indirect) { - uint64_t base_va = r600_resource(info->indirect)->gpu_address; + uint64_t base_va = si_resource(info->indirect)->gpu_address; - radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, - (struct r600_resource *)info->indirect, + radeon_add_to_buffer_list(sctx, sctx->gfx_cs, + si_resource(info->indirect), RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT); radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) | @@ -794,51 +898,60 @@ static void si_launch_grid( * compute isn't used, i.e. only one compute job can run at a time. * If async compute is possible, the threadgroup size must be limited * to 256 threads on all queues to avoid the bug. - * Only SI and certain CIK chips are affected. + * Only GFX6 and certain GFX7 chips are affected. */ bool cs_regalloc_hang = - (sctx->b.chip_class == SI || - sctx->b.family == CHIP_BONAIRE || - sctx->b.family == CHIP_KABINI) && + (sctx->chip_class == GFX6 || + sctx->family == CHIP_BONAIRE || + sctx->family == CHIP_KABINI) && info->block[0] * info->block[1] * info->block[2] > 256; if (cs_regalloc_hang) - sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | + sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH; - if (program->ir_type == PIPE_SHADER_IR_TGSI && + if (program->ir_type != PIPE_SHADER_IR_NATIVE && program->shader.compilation_failed) return; - if (sctx->b.last_num_draw_calls != sctx->b.num_draw_calls) { - si_update_fb_dirtiness_after_rendering(sctx); - sctx->b.last_num_draw_calls = sctx->b.num_draw_calls; - } + if (sctx->has_graphics) { + if (sctx->last_num_draw_calls != sctx->num_draw_calls) { + si_update_fb_dirtiness_after_rendering(sctx); + sctx->last_num_draw_calls = sctx->num_draw_calls; + } - si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE); + si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE); + } /* Add buffer sizes for memory checking in need_cs_space. */ - si_context_add_resource_size(ctx, &program->shader.bo->b.b); + si_context_add_resource_size(sctx, &program->shader.bo->b.b); /* TODO: add the scratch buffer */ if (info->indirect) { - si_context_add_resource_size(ctx, info->indirect); + si_context_add_resource_size(sctx, info->indirect); /* Indirect buffers use TC L2 on GFX9, but not older hw. */ - if (sctx->b.chip_class <= VI && - r600_resource(info->indirect)->TC_L2_dirty) { - sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2; - r600_resource(info->indirect)->TC_L2_dirty = false; + if (sctx->chip_class <= GFX8 && + si_resource(info->indirect)->TC_L2_dirty) { + sctx->flags |= SI_CONTEXT_WB_L2; + si_resource(info->indirect)->TC_L2_dirty = false; } } - si_need_cs_space(sctx); + si_need_gfx_cs_space(sctx); + + if (sctx->bo_list_add_all_compute_resources) + si_compute_resources_add_all_to_bo_list(sctx); - if (!sctx->cs_shader_state.initialized) - si_initialize_compute(sctx); + if (!sctx->cs_shader_state.initialized) { + si_emit_initial_compute_regs(sctx, sctx->gfx_cs); - if (sctx->b.flags) - si_emit_cache_flush(sctx); + sctx->cs_shader_state.emitted_program = NULL; + sctx->cs_shader_state.initialized = true; + } + + if (sctx->flags) + sctx->emit_cache_flush(sctx); if (!si_switch_compute_shader(sctx, program, &program->shader, code_object, info->pc)) @@ -847,10 +960,10 @@ static void si_launch_grid( si_upload_compute_shader_descriptors(sctx); si_emit_compute_shader_pointers(sctx); - if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) { - sctx->atoms.s.render_cond->emit(&sctx->b, - sctx->atoms.s.render_cond); - si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false); + if (sctx->has_graphics && + si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) { + sctx->atoms.s.render_cond.emit(sctx); + si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false); } if ((program->input_size || @@ -861,38 +974,38 @@ static void si_launch_grid( /* Global buffers */ for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) { - struct r600_resource *buffer = - (struct r600_resource*)program->global_buffers[i]; + struct si_resource *buffer = + si_resource(program->global_buffers[i]); if (!buffer) { continue; } - radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer, + radeon_add_to_buffer_list(sctx, sctx->gfx_cs, buffer, RADEON_USAGE_READWRITE, RADEON_PRIO_COMPUTE_GLOBAL); } - if (program->ir_type == PIPE_SHADER_IR_TGSI) - si_setup_tgsi_grid(sctx, info); + if (program->ir_type != PIPE_SHADER_IR_NATIVE) + si_setup_tgsi_user_data(sctx, info); si_emit_dispatch_packets(sctx, info); if (unlikely(sctx->current_saved_cs)) { si_trace_emit(sctx); - si_log_compute_state(sctx, sctx->b.log); + si_log_compute_state(sctx, sctx->log); } sctx->compute_is_busy = true; - sctx->b.num_compute_calls++; + sctx->num_compute_calls++; if (sctx->cs_shader_state.uses_scratch) - sctx->b.num_spill_compute_calls++; + sctx->num_spill_compute_calls++; if (cs_regalloc_hang) - sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH; + sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH; } void si_destroy_compute(struct si_compute *program) { - if (program->ir_type == PIPE_SHADER_IR_TGSI) { + if (program->ir_type != PIPE_SHADER_IR_NATIVE) { util_queue_drop_job(&program->screen->shader_compiler_queue, &program->ready); util_queue_fence_destroy(&program->ready); @@ -924,11 +1037,10 @@ static void si_set_compute_resources(struct pipe_context * ctx_, void si_init_compute_functions(struct si_context *sctx) { - sctx->b.b.create_compute_state = si_create_compute_state; - sctx->b.b.delete_compute_state = si_delete_compute_state; - sctx->b.b.bind_compute_state = si_bind_compute_state; -/* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */ - sctx->b.b.set_compute_resources = si_set_compute_resources; - sctx->b.b.set_global_binding = si_set_global_binding; - sctx->b.b.launch_grid = si_launch_grid; + sctx->b.create_compute_state = si_create_compute_state; + sctx->b.delete_compute_state = si_delete_compute_state; + sctx->b.bind_compute_state = si_bind_compute_state; + sctx->b.set_compute_resources = si_set_compute_resources; + sctx->b.set_global_binding = si_set_global_binding; + sctx->b.launch_grid = si_launch_grid; }