/*
* Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
*/
#include "tgsi/tgsi_parse.h"
+#include "util/u_async_debug.h"
#include "util/u_memory.h"
#include "util/u_upload_mgr.h"
#include "amd_kernel_code_t.h"
-#include "radeon/r600_cs.h"
-#include "si_pipe.h"
+#include "si_build_pm4.h"
#include "si_compute.h"
-#include "sid.h"
+
+#define COMPUTE_DBG(rscreen, fmt, args...) \
+ do { \
+ if ((rscreen->debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
+ } while (0);
struct dispatch_packet {
uint16_t header;
struct si_compute *program = (struct si_compute *)job;
struct si_shader *shader = &program->shader;
struct si_shader_selector sel;
- LLVMTargetMachineRef tm;
+ struct si_compiler *compiler;
struct pipe_debug_callback *debug = &program->compiler_ctx_state.debug;
- if (thread_index >= 0) {
- assert(thread_index < ARRAY_SIZE(program->screen->tm));
- tm = program->screen->tm[thread_index];
- if (!debug->async)
- debug = NULL;
- } else {
- tm = program->compiler_ctx_state.tm;
- }
+ assert(!debug->debug_message || debug->async);
+ assert(thread_index >= 0);
+ assert(thread_index < ARRAY_SIZE(program->screen->compiler));
+ compiler = &program->screen->compiler[thread_index];
memset(&sel, 0, sizeof(sel));
sel.screen = program->screen;
- tgsi_scan_shader(program->tokens, &sel.info);
- sel.tokens = program->tokens;
+
+ if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+ tgsi_scan_shader(program->ir.tgsi, &sel.info);
+ sel.tokens = program->ir.tgsi;
+ } else {
+ assert(program->ir_type == PIPE_SHADER_IR_NIR);
+ sel.nir = program->ir.nir;
+
+ si_nir_scan_shader(sel.nir, &sel.info);
+ si_lower_nir(&sel);
+ }
+
+
sel.type = PIPE_SHADER_COMPUTE;
sel.local_size = program->local_size;
si_get_active_slot_masks(&sel.info,
program->uses_bindless_samplers = sel.info.uses_bindless_samplers;
program->uses_bindless_images = sel.info.uses_bindless_images;
- if (si_shader_create(program->screen, tm, &program->shader, debug)) {
+ if (si_shader_create(program->screen, compiler, &program->shader, debug)) {
program->shader.compilation_failed = true;
} else {
bool scratch_enabled = shader->config.scratch_bytes_per_wave > 0;
sel.info.properties[TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH] == 0;
}
- FREE(program->tokens);
+ if (program->ir_type == PIPE_SHADER_IR_TGSI)
+ FREE(program->ir.tgsi);
+
program->shader.selector = NULL;
}
program->local_size = cso->req_local_mem;
program->private_size = cso->req_private_mem;
program->input_size = cso->req_input_mem;
- program->use_code_object_v2 = HAVE_LLVM >= 0x0400 &&
- cso->ir_type == PIPE_SHADER_IR_NATIVE;
-
- if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
- program->tokens = tgsi_dup_tokens(cso->prog);
- if (!program->tokens) {
- FREE(program);
- return NULL;
+ program->use_code_object_v2 = cso->ir_type == PIPE_SHADER_IR_NATIVE;
+
+ if (cso->ir_type != PIPE_SHADER_IR_NATIVE) {
+ if (cso->ir_type == PIPE_SHADER_IR_TGSI) {
+ program->ir.tgsi = tgsi_dup_tokens(cso->prog);
+ if (!program->ir.tgsi) {
+ FREE(program);
+ return NULL;
+ }
+ } else {
+ assert(cso->ir_type == PIPE_SHADER_IR_NIR);
+ program->ir.nir = (struct nir_shader *) cso->prog;
}
- program->compiler_ctx_state.tm = sctx->tm;
- program->compiler_ctx_state.debug = sctx->b.debug;
+ program->compiler_ctx_state.debug = sctx->debug;
program->compiler_ctx_state.is_debug_context = sctx->is_debug;
- p_atomic_inc(&sscreen->b.num_shaders_created);
+ p_atomic_inc(&sscreen->num_shaders_created);
util_queue_fence_init(&program->ready);
- if ((sctx->b.debug.debug_message && !sctx->b.debug.async) ||
- sctx->is_debug ||
- si_can_dump_shader(&sscreen->b, PIPE_SHADER_COMPUTE))
- si_create_compute_state_async(program, -1);
- else
- util_queue_add_job(&sscreen->shader_compiler_queue,
- program, &program->ready,
- si_create_compute_state_async, NULL);
+ struct util_async_debug_callback async_debug;
+ bool wait =
+ (sctx->debug.debug_message && !sctx->debug.async) ||
+ sctx->is_debug ||
+ si_can_dump_shader(sscreen, PIPE_SHADER_COMPUTE);
+
+ if (wait) {
+ u_async_debug_init(&async_debug);
+ program->compiler_ctx_state.debug = async_debug.base;
+ }
+
+ util_queue_add_job(&sscreen->shader_compiler_queue,
+ program, &program->ready,
+ si_create_compute_state_async, NULL);
+
+ if (wait) {
+ util_queue_fence_wait(&program->ready);
+ u_async_debug_drain(&async_debug, &sctx->debug);
+ u_async_debug_cleanup(&async_debug);
+ }
} else {
const struct pipe_llvm_program_header *header;
const char *code;
si_shader_binary_read_config(&program->shader.binary,
&program->shader.config, 0);
}
- si_shader_dump(sctx->screen, &program->shader, &sctx->b.debug,
+ si_shader_dump(sctx->screen, &program->shader, &sctx->debug,
PIPE_SHADER_COMPUTE, stderr, true);
if (si_shader_binary_upload(sctx->screen, &program->shader) < 0) {
fprintf(stderr, "LLVM failed to upload shader\n");
return;
/* Wait because we need active slot usage masks. */
- if (program->ir_type == PIPE_SHADER_IR_TGSI)
+ if (program->ir_type != PIPE_SHADER_IR_NATIVE)
util_queue_fence_wait(&program->ready);
si_set_active_descriptors(sctx,
static void si_initialize_compute(struct si_context *sctx)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint64_t bc_va;
radeon_set_sh_reg_seq(cs, R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE0, 2);
radeon_emit(cs, S_00B858_SH0_CU_EN(0xffff) | S_00B858_SH1_CU_EN(0xffff));
radeon_emit(cs, S_00B85C_SH0_CU_EN(0xffff) | S_00B85C_SH1_CU_EN(0xffff));
- if (sctx->b.chip_class >= CIK) {
+ if (sctx->chip_class >= CIK) {
/* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
radeon_set_sh_reg_seq(cs,
R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
* kernel if we want to use something other than the default value,
* which is now 0x22f.
*/
- if (sctx->b.chip_class <= SI) {
+ if (sctx->chip_class <= SI) {
/* XXX: This should be:
* (number of compute units) * 4 * (waves per simd) - 1 */
/* Set the pointer to border colors. */
bc_va = sctx->border_color_buffer->gpu_address;
- if (sctx->b.chip_class >= CIK) {
+ if (sctx->chip_class >= CIK) {
radeon_set_uconfig_reg_seq(cs, R_030E00_TA_CS_BC_BASE_ADDR, 2);
radeon_emit(cs, bc_va >> 8); /* R_030E00_TA_CS_BC_BASE_ADDR */
- radeon_emit(cs, bc_va >> 40); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
+ radeon_emit(cs, S_030E04_ADDRESS(bc_va >> 40)); /* R_030E04_TA_CS_BC_BASE_ADDR_HI */
} else {
- if (sctx->screen->b.info.drm_major == 3 ||
- (sctx->screen->b.info.drm_major == 2 &&
- sctx->screen->b.info.drm_minor >= 48)) {
+ if (sctx->screen->info.si_TA_CS_BC_BASE_ADDR_allowed) {
radeon_set_config_reg(cs, R_00950C_TA_CS_BC_BASE_ADDR,
bc_va >> 8);
}
if (scratch_bo_size < scratch_needed) {
r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
- sctx->compute_scratch_buffer = (struct r600_resource*)
- si_aligned_buffer_create(&sctx->screen->b.b,
- R600_RESOURCE_FLAG_UNMAPPABLE,
- PIPE_USAGE_DEFAULT,
- scratch_needed, 256);
+ sctx->compute_scratch_buffer =
+ si_aligned_buffer_create(&sctx->screen->b,
+ SI_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ scratch_needed, 256);
if (!sctx->compute_scratch_buffer)
return false;
const amd_kernel_code_t *code_object,
unsigned offset)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
struct si_shader_config inline_config = {0};
struct si_shader_config *config;
uint64_t shader_va;
sctx->cs_shader_state.offset == offset)
return true;
- if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+ if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
config = &shader->config;
} else {
unsigned lds_blocks;
* allocated in the shader and 4 bytes allocated by the state
* tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
*/
- if (sctx->b.chip_class <= SI) {
+ if (sctx->chip_class <= SI) {
lds_blocks += align(program->local_size, 256) >> 8;
} else {
lds_blocks += align(program->local_size, 512) >> 9;
config->scratch_bytes_per_wave *
sctx->scratch_waves);
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
shader->scratch_bo, RADEON_USAGE_READWRITE,
RADEON_PRIO_SCRATCH_BUFFER);
}
* command. However, that would add more complexity and we're likely
* to get a shader state change in that case anyway.
*/
- if (sctx->b.chip_class >= CIK) {
+ if (sctx->chip_class >= CIK) {
cik_prefetch_TC_L2_async(sctx, &program->shader.bo->b.b,
0, program->shader.bo->b.b.width0);
}
shader_va += sizeof(amd_kernel_code_t);
}
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, shader->bo,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, shader->bo,
RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY);
radeon_set_sh_reg_seq(cs, R_00B830_COMPUTE_PGM_LO, 2);
radeon_emit(cs, shader_va >> 8);
- radeon_emit(cs, shader_va >> 40);
+ radeon_emit(cs, S_00B834_DATA(shader_va >> 40));
radeon_set_sh_reg_seq(cs, R_00B848_COMPUTE_PGM_RSRC1, 2);
radeon_emit(cs, config->rsrc1);
const amd_kernel_code_t *code_object,
unsigned user_sgpr)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
unsigned max_private_element_size = AMD_HSA_BITS_GET(
S_008F0C_INDEX_STRIDE(3) |
S_008F0C_ADD_TID_ENABLE(1);
- if (sctx->b.chip_class >= GFX9) {
+ if (sctx->chip_class >= GFX9) {
assert(max_private_element_size == 1); /* always 4 bytes on GFX9 */
} else {
scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
- if (sctx->b.chip_class < VI) {
+ if (sctx->chip_class < VI) {
/* BUF_DATA_FORMAT is ignored, but it cannot be
* BUF_DATA_FORMAT_INVALID. */
scratch_dword3 |=
uint64_t kernel_args_va)
{
struct si_compute *program = sctx->cs_shader_state.program;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
static const enum amd_code_property_mask_t workgroup_count_masks [] = {
AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X,
/* Upload dispatch ptr */
memset(&dispatch, 0, sizeof(dispatch));
- dispatch.workgroup_size_x = info->block[0];
- dispatch.workgroup_size_y = info->block[1];
- dispatch.workgroup_size_z = info->block[2];
+ dispatch.workgroup_size_x = util_cpu_to_le16(info->block[0]);
+ dispatch.workgroup_size_y = util_cpu_to_le16(info->block[1]);
+ dispatch.workgroup_size_z = util_cpu_to_le16(info->block[2]);
- dispatch.grid_size_x = info->grid[0] * info->block[0];
- dispatch.grid_size_y = info->grid[1] * info->block[1];
- dispatch.grid_size_z = info->grid[2] * info->block[2];
+ dispatch.grid_size_x = util_cpu_to_le32(info->grid[0] * info->block[0]);
+ dispatch.grid_size_y = util_cpu_to_le32(info->grid[1] * info->block[1]);
+ dispatch.grid_size_z = util_cpu_to_le32(info->grid[2] * info->block[2]);
- dispatch.private_segment_size = program->private_size;
- dispatch.group_segment_size = program->local_size;
+ dispatch.private_segment_size = util_cpu_to_le32(program->private_size);
+ dispatch.group_segment_size = util_cpu_to_le32(program->local_size);
- dispatch.kernarg_address = kernel_args_va;
+ dispatch.kernarg_address = util_cpu_to_le64(kernel_args_va);
- u_upload_data(sctx->b.b.const_uploader, 0, sizeof(dispatch),
+ u_upload_data(sctx->b.const_uploader, 0, sizeof(dispatch),
256, &dispatch, &dispatch_offset,
(struct pipe_resource**)&dispatch_buf);
fprintf(stderr, "Error: Failed to allocate dispatch "
"packet.");
}
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, dispatch_buf,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, dispatch_buf,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
dispatch_va = dispatch_buf->gpu_address + dispatch_offset;
const amd_kernel_code_t *code_object,
const struct pipe_grid_info *info)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
struct si_compute *program = sctx->cs_shader_state.program;
struct r600_resource *input_buffer = NULL;
unsigned kernel_args_size;
/* The extra num_work_size_bytes are for work group / work item size information */
kernel_args_size = program->input_size + num_work_size_bytes;
- u_upload_alloc(sctx->b.b.const_uploader, 0, kernel_args_size,
- sctx->screen->b.info.tcc_cache_line_size,
+ u_upload_alloc(sctx->b.const_uploader, 0, kernel_args_size,
+ sctx->screen->info.tcc_cache_line_size,
&kernel_args_offset,
(struct pipe_resource**)&input_buffer, &kernel_args_ptr);
if (!code_object) {
for (i = 0; i < 3; i++) {
- kernel_args[i] = info->grid[i];
- kernel_args[i + 3] = info->grid[i] * info->block[i];
- kernel_args[i + 6] = info->block[i];
+ kernel_args[i] = util_cpu_to_le32(info->grid[i]);
+ kernel_args[i + 3] = util_cpu_to_le32(info->grid[i] * info->block[i]);
+ kernel_args[i + 6] = util_cpu_to_le32(info->block[i]);
}
}
}
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, input_buffer,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, input_buffer,
RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
if (code_object) {
const struct pipe_grid_info *info)
{
struct si_compute *program = sctx->cs_shader_state.program;
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
unsigned grid_size_reg = R_00B900_COMPUTE_USER_DATA_0 +
4 * SI_NUM_RESOURCE_SGPRS;
unsigned block_size_reg = grid_size_reg +
uint64_t va = base_va + info->indirect_offset;
int i;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource *)info->indirect,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
+ r600_resource(info->indirect),
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
for (i = 0; i < 3; ++i) {
static void si_emit_dispatch_packets(struct si_context *sctx,
const struct pipe_grid_info *info)
{
- struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
- bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off;
+ struct si_screen *sscreen = sctx->screen;
+ struct radeon_cmdbuf *cs = sctx->gfx_cs;
+ bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
unsigned waves_per_threadgroup =
DIV_ROUND_UP(info->block[0] * info->block[1] * info->block[2], 64);
+ unsigned compute_resource_limits =
+ S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0);
+
+ if (sctx->chip_class >= CIK) {
+ unsigned num_cu_per_se = sscreen->info.num_good_compute_units /
+ sscreen->info.max_se;
+
+ /* Force even distribution on all SIMDs in CU if the workgroup
+ * size is 64. This has shown some good improvements if # of CUs
+ * per SE is not a multiple of 4.
+ */
+ if (num_cu_per_se % 4 && waves_per_threadgroup == 1)
+ compute_resource_limits |= S_00B854_FORCE_SIMD_DIST(1);
+ }
radeon_set_sh_reg(cs, R_00B854_COMPUTE_RESOURCE_LIMITS,
- S_00B854_SIMD_DEST_CNTL(waves_per_threadgroup % 4 == 0));
+ compute_resource_limits);
radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
radeon_emit(cs, S_00B81C_NUM_THREAD_FULL(info->block[0]));
S_00B800_FORCE_START_AT_000(1) |
/* If the KMD allows it (there is a KMD hw register for it),
* allow launching waves out-of-order. (same as Vulkan) */
- S_00B800_ORDER_MODE(sctx->b.chip_class >= CIK);
+ S_00B800_ORDER_MODE(sctx->chip_class >= CIK);
if (info->indirect) {
uint64_t base_va = r600_resource(info->indirect)->gpu_address;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- (struct r600_resource *)info->indirect,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
+ r600_resource(info->indirect),
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
* Only SI and certain CIK chips are affected.
*/
bool cs_regalloc_hang =
- (sctx->b.chip_class == SI ||
- sctx->b.family == CHIP_BONAIRE ||
- sctx->b.family == CHIP_KABINI) &&
+ (sctx->chip_class == SI ||
+ sctx->family == CHIP_BONAIRE ||
+ sctx->family == CHIP_KABINI) &&
info->block[0] * info->block[1] * info->block[2] > 256;
if (cs_regalloc_hang)
- sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
SI_CONTEXT_CS_PARTIAL_FLUSH;
- if (program->ir_type == PIPE_SHADER_IR_TGSI &&
+ if (program->ir_type != PIPE_SHADER_IR_NATIVE &&
program->shader.compilation_failed)
return;
- if (sctx->b.last_num_draw_calls != sctx->b.num_draw_calls) {
+ if (sctx->last_num_draw_calls != sctx->num_draw_calls) {
si_update_fb_dirtiness_after_rendering(sctx);
- sctx->b.last_num_draw_calls = sctx->b.num_draw_calls;
+ sctx->last_num_draw_calls = sctx->num_draw_calls;
}
- si_decompress_compute_textures(sctx);
+ si_decompress_textures(sctx, 1 << PIPE_SHADER_COMPUTE);
/* Add buffer sizes for memory checking in need_cs_space. */
- r600_context_add_resource_size(ctx, &program->shader.bo->b.b);
+ si_context_add_resource_size(sctx, &program->shader.bo->b.b);
/* TODO: add the scratch buffer */
if (info->indirect) {
- r600_context_add_resource_size(ctx, info->indirect);
+ si_context_add_resource_size(sctx, info->indirect);
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
- if (sctx->b.chip_class <= VI &&
+ if (sctx->chip_class <= VI &&
r600_resource(info->indirect)->TC_L2_dirty) {
- sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
r600_resource(info->indirect)->TC_L2_dirty = false;
}
}
- si_need_cs_space(sctx);
-
- if (sctx->b.log)
- si_log_compute_state(sctx, sctx->b.log);
+ si_need_gfx_cs_space(sctx);
if (!sctx->cs_shader_state.initialized)
si_initialize_compute(sctx);
- if (sctx->b.flags)
+ if (sctx->flags)
si_emit_cache_flush(sctx);
if (!si_switch_compute_shader(sctx, program, &program->shader,
si_upload_compute_shader_descriptors(sctx);
si_emit_compute_shader_pointers(sctx);
- if (si_is_atom_dirty(sctx, sctx->atoms.s.render_cond)) {
- sctx->atoms.s.render_cond->emit(&sctx->b,
- sctx->atoms.s.render_cond);
- si_set_atom_dirty(sctx, sctx->atoms.s.render_cond, false);
+ if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond)) {
+ sctx->atoms.s.render_cond.emit(sctx);
+ si_set_atom_dirty(sctx, &sctx->atoms.s.render_cond, false);
}
if ((program->input_size ||
/* Global buffers */
for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
struct r600_resource *buffer =
- (struct r600_resource*)program->global_buffers[i];
+ r600_resource(program->global_buffers[i]);
if (!buffer) {
continue;
}
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buffer,
+ radeon_add_to_buffer_list(sctx, sctx->gfx_cs, buffer,
RADEON_USAGE_READWRITE,
RADEON_PRIO_COMPUTE_GLOBAL);
}
- if (program->ir_type == PIPE_SHADER_IR_TGSI)
+ if (program->ir_type != PIPE_SHADER_IR_NATIVE)
si_setup_tgsi_grid(sctx, info);
si_emit_dispatch_packets(sctx, info);
- if (unlikely(sctx->current_saved_cs))
+ if (unlikely(sctx->current_saved_cs)) {
si_trace_emit(sctx);
+ si_log_compute_state(sctx, sctx->log);
+ }
sctx->compute_is_busy = true;
- sctx->b.num_compute_calls++;
+ sctx->num_compute_calls++;
if (sctx->cs_shader_state.uses_scratch)
- sctx->b.num_spill_compute_calls++;
+ sctx->num_spill_compute_calls++;
if (cs_regalloc_hang)
- sctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
+ sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
}
void si_destroy_compute(struct si_compute *program)
{
- if (program->ir_type == PIPE_SHADER_IR_TGSI) {
+ if (program->ir_type != PIPE_SHADER_IR_NATIVE) {
util_queue_drop_job(&program->screen->shader_compiler_queue,
&program->ready);
util_queue_fence_destroy(&program->ready);
void si_init_compute_functions(struct si_context *sctx)
{
- sctx->b.b.create_compute_state = si_create_compute_state;
- sctx->b.b.delete_compute_state = si_delete_compute_state;
- sctx->b.b.bind_compute_state = si_bind_compute_state;
+ sctx->b.create_compute_state = si_create_compute_state;
+ sctx->b.delete_compute_state = si_delete_compute_state;
+ sctx->b.bind_compute_state = si_bind_compute_state;
/* ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; */
- sctx->b.b.set_compute_resources = si_set_compute_resources;
- sctx->b.b.set_global_binding = si_set_global_binding;
- sctx->b.b.launch_grid = si_launch_grid;
+ sctx->b.set_compute_resources = si_set_compute_resources;
+ sctx->b.set_global_binding = si_set_global_binding;
+ sctx->b.launch_grid = si_launch_grid;
}