*/
+#ifdef HAVE_OPENCL
+static void radeon_shader_binary_init(struct r600_shader_binary *b)
+{
+ memset(b, 0, sizeof(*b));
+}
+
+static void radeon_shader_binary_clean(struct r600_shader_binary *b)
+{
+ if (!b)
+ return;
+ FREE(b->code);
+ FREE(b->config);
+ FREE(b->rodata);
+ FREE(b->global_symbol_offsets);
+ FREE(b->relocs);
+ FREE(b->disasm_string);
+}
+#endif
+
struct r600_resource *r600_compute_buffer_alloc_vram(struct r600_screen *screen,
unsigned size)
{
rat_templ.u.tex.first_layer = 0;
rat_templ.u.tex.last_layer = 0;
- /* Add the RAT the list of color buffers */
+ /* Add the RAT the list of color buffers. Drop the old buffer first. */
+ pipe_surface_reference(&pipe->ctx->framebuffer.state.cbufs[id], NULL);
pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface(
(struct pipe_context *)pipe->ctx,
(struct pipe_resource *)bo, &rat_templ);
#ifdef HAVE_OPENCL
static void parse_symbol_table(Elf_Data *symbol_table_data,
const GElf_Shdr *symbol_table_header,
- struct ac_shader_binary *binary)
+ struct r600_shader_binary *binary)
{
GElf_Sym symbol;
unsigned i = 0;
static void parse_relocs(Elf *elf, Elf_Data *relocs, Elf_Data *symbols,
unsigned symbol_sh_link,
- struct ac_shader_binary *binary)
+ struct r600_shader_binary *binary)
{
unsigned i;
return;
}
binary->relocs = CALLOC(binary->reloc_count,
- sizeof(struct ac_shader_reloc));
+ sizeof(struct r600_shader_reloc));
for (i = 0; i < binary->reloc_count; i++) {
GElf_Sym symbol;
GElf_Rel rel;
char *symbol_name;
- struct ac_shader_reloc *reloc = &binary->relocs[i];
+ struct r600_shader_reloc *reloc = &binary->relocs[i];
gelf_getrel(relocs, i, &rel);
gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &symbol);
}
static void r600_elf_read(const char *elf_data, unsigned elf_size,
- struct ac_shader_binary *binary)
+ struct r600_shader_binary *binary)
{
char *elf_buffer;
Elf *elf;
}
static const unsigned char *r600_shader_binary_config_start(
- const struct ac_shader_binary *binary,
+ const struct r600_shader_binary *binary,
uint64_t symbol_offset)
{
unsigned i;
return binary->config;
}
-static void r600_shader_binary_read_config(const struct ac_shader_binary *binary,
+static void r600_shader_binary_read_config(const struct r600_shader_binary *binary,
struct r600_bytecode *bc,
uint64_t symbol_offset,
boolean *use_kill)
}
static unsigned r600_create_shader(struct r600_bytecode *bc,
- const struct ac_shader_binary *binary,
+ const struct r600_shader_binary *binary,
boolean *use_kill)
{
/* Upload code + ROdata */
shader->code_bo = r600_compute_buffer_alloc_vram(rctx->screen,
shader->bc.ndw * 4);
- p = r600_buffer_map_sync_with_rings(&rctx->b, shader->code_bo, PIPE_TRANSFER_WRITE);
+ p = r600_buffer_map_sync_with_rings(
+ &rctx->b, shader->code_bo,
+ PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
//TODO: use util_memcpy_cpu_to_le32 ?
memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
rctx->b.ws->buffer_unmap(shader->code_bo->buf);
} else {
#ifdef HAVE_OPENCL
radeon_shader_binary_clean(&shader->binary);
- pipe_resource_reference(&shader->code_bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&shader->code_bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&shader->kernel_param, NULL);
#endif
r600_destroy_shader(&shader->bc);
}
uint32_t indirect_grid[3])
{
int i;
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
unsigned num_waves;
static void compute_setup_cbs(struct r600_context *rctx)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
unsigned i;
/* Emit colorbuffers. */
static void compute_emit_cs(struct r600_context *rctx,
const struct pipe_grid_info *info)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
bool compute_dirty = false;
struct r600_pipe_shader *current;
struct r600_shader_atomic combined_atomics[8];
rctx->cmd_buf_is_compute = true;
}
- r600_need_cs_space(rctx, 0, true);
if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) {
r600_shader_select(&rctx->b.b, rctx->cs_shader_state.shader->sel, &compute_dirty);
current = rctx->cs_shader_state.shader->sel->current;
}
rctx->cs_block_grid_sizes[3] = rctx->cs_block_grid_sizes[7] = 0;
rctx->driver_consts[PIPE_SHADER_COMPUTE].cs_block_grid_size_dirty = true;
+
+ evergreen_emit_atomic_buffer_setup_count(rctx, current, combined_atomics, &atomic_used_mask);
+ r600_need_cs_space(rctx, 0, true, util_bitcount(atomic_used_mask));
+
if (need_buf_const) {
eg_setup_buffer_constants(rctx, PIPE_SHADER_COMPUTE);
}
r600_update_driver_const_buffers(rctx, true);
- if (evergreen_emit_atomic_buffer_setup(rctx, current, combined_atomics, &atomic_used_mask)) {
+ evergreen_emit_atomic_buffer_setup(rctx, true, combined_atomics, atomic_used_mask);
+ if (atomic_used_mask) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
- }
+ } else
+ r600_need_cs_space(rctx, 0, true, 0);
/* Initialize all the compute-related registers.
*
struct r600_cs_shader_state *state =
(struct r600_cs_shader_state*)atom;
struct r600_pipe_compute *shader = state->shader;
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
uint64_t va;
struct r600_resource *code_bo;
unsigned ngpr, nstack;