* Adam Rak <adam.rak@streamnovation.com>
*/
+#ifdef HAVE_OPENCL
#include <gelf.h>
#include <libelf.h>
+#endif
#include <stdio.h>
#include <errno.h>
#include "pipe/p_defines.h"
#include "util/u_memory.h"
#include "util/u_inlines.h"
#include "util/u_framebuffer.h"
+#include "tgsi/tgsi_parse.h"
#include "pipebuffer/pb_buffer.h"
#include "evergreend.h"
#include "r600_shader.h"
*/
+#ifdef HAVE_OPENCL
+static void radeon_shader_binary_init(struct r600_shader_binary *b)
+{
+ memset(b, 0, sizeof(*b));
+}
+
+static void radeon_shader_binary_clean(struct r600_shader_binary *b)
+{
+ if (!b)
+ return;
+ FREE(b->code);
+ FREE(b->config);
+ FREE(b->rodata);
+ FREE(b->global_symbol_offsets);
+ FREE(b->relocs);
+ FREE(b->disasm_string);
+}
+#endif
+
struct r600_resource *r600_compute_buffer_alloc_vram(struct r600_screen *screen,
unsigned size)
{
rat_templ.u.tex.first_layer = 0;
rat_templ.u.tex.last_layer = 0;
- /* Add the RAT the list of color buffers */
+ /* Add the RAT the list of color buffers. Drop the old buffer first. */
+ pipe_surface_reference(&pipe->ctx->framebuffer.state.cbufs[id], NULL);
pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface(
(struct pipe_context *)pipe->ctx,
(struct pipe_resource *)bo, &rat_templ);
#ifdef HAVE_OPENCL
static void parse_symbol_table(Elf_Data *symbol_table_data,
const GElf_Shdr *symbol_table_header,
- struct ac_shader_binary *binary)
+ struct r600_shader_binary *binary)
{
GElf_Sym symbol;
unsigned i = 0;
static void parse_relocs(Elf *elf, Elf_Data *relocs, Elf_Data *symbols,
unsigned symbol_sh_link,
- struct ac_shader_binary *binary)
+ struct r600_shader_binary *binary)
{
unsigned i;
return;
}
binary->relocs = CALLOC(binary->reloc_count,
- sizeof(struct ac_shader_reloc));
+ sizeof(struct r600_shader_reloc));
for (i = 0; i < binary->reloc_count; i++) {
GElf_Sym symbol;
GElf_Rel rel;
char *symbol_name;
- struct ac_shader_reloc *reloc = &binary->relocs[i];
+ struct r600_shader_reloc *reloc = &binary->relocs[i];
gelf_getrel(relocs, i, &rel);
gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &symbol);
}
static void r600_elf_read(const char *elf_data, unsigned elf_size,
- struct ac_shader_binary *binary)
+ struct r600_shader_binary *binary)
{
char *elf_buffer;
Elf *elf;
}
static const unsigned char *r600_shader_binary_config_start(
- const struct ac_shader_binary *binary,
+ const struct r600_shader_binary *binary,
uint64_t symbol_offset)
{
unsigned i;
return binary->config;
}
-static void r600_shader_binary_read_config(const struct ac_shader_binary *binary,
+static void r600_shader_binary_read_config(const struct r600_shader_binary *binary,
struct r600_bytecode *bc,
uint64_t symbol_offset,
boolean *use_kill)
}
static unsigned r600_create_shader(struct r600_bytecode *bc,
- const struct ac_shader_binary *binary,
+ const struct r600_shader_binary *binary,
boolean *use_kill)
{
const char *code;
void *p;
boolean use_kill;
+#endif
+ shader->ctx = rctx;
+ shader->local_size = cso->req_local_mem;
+ shader->private_size = cso->req_private_mem;
+ shader->input_size = cso->req_input_mem;
+
+ shader->ir_type = cso->ir_type;
+
+ if (shader->ir_type == PIPE_SHADER_IR_TGSI) {
+ shader->sel = r600_create_shader_state_tokens(ctx, cso->prog, PIPE_SHADER_COMPUTE);
+ return shader;
+ }
+#ifdef HAVE_OPENCL
COMPUTE_DBG(rctx->screen, "*** evergreen_create_compute_state\n");
header = cso->prog;
code = cso->prog + sizeof(struct pipe_llvm_program_header);
/* Upload code + ROdata */
shader->code_bo = r600_compute_buffer_alloc_vram(rctx->screen,
shader->bc.ndw * 4);
- p = r600_buffer_map_sync_with_rings(&rctx->b, shader->code_bo, PIPE_TRANSFER_WRITE);
+ p = r600_buffer_map_sync_with_rings(
+ &rctx->b, shader->code_bo,
+ PIPE_TRANSFER_WRITE | RADEON_TRANSFER_TEMPORARY);
//TODO: use util_memcpy_cpu_to_le32 ?
memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
rctx->b.ws->buffer_unmap(shader->code_bo->buf);
#endif
- shader->ctx = rctx;
- shader->local_size = cso->req_local_mem;
- shader->private_size = cso->req_private_mem;
- shader->input_size = cso->req_input_mem;
-
return shader;
}
if (!shader)
return;
+ if (shader->ir_type == PIPE_SHADER_IR_TGSI) {
+ r600_delete_shader_selector(ctx, shader->sel);
+ } else {
#ifdef HAVE_OPENCL
- radeon_shader_binary_clean(&shader->binary);
+ radeon_shader_binary_clean(&shader->binary);
+ pipe_resource_reference((struct pipe_resource**)&shader->code_bo, NULL);
+ pipe_resource_reference((struct pipe_resource**)&shader->kernel_param, NULL);
#endif
- r600_destroy_shader(&shader->bc);
-
- /* TODO destroy shader->code_bo, shader->const_bo
- * we'll need something like r600_buffer_free */
+ r600_destroy_shader(&shader->bc);
+ }
FREE(shader);
}
static void evergreen_bind_compute_state(struct pipe_context *ctx, void *state)
{
struct r600_context *rctx = (struct r600_context *)ctx;
-
+ struct r600_pipe_compute *cstate = (struct r600_pipe_compute *)state;
COMPUTE_DBG(rctx->screen, "*** evergreen_bind_compute_state\n");
+ if (!state) {
+ rctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
+ return;
+ }
+
+ if (cstate->ir_type == PIPE_SHADER_IR_TGSI) {
+ bool compute_dirty;
+
+ r600_shader_select(ctx, cstate->sel, &compute_dirty);
+ }
+
rctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
}
/* We need to reserve 9 dwords (36 bytes) for implicit kernel
* parameters.
*/
- unsigned input_size = shader->input_size + 36;
+ unsigned input_size;
uint32_t *num_work_groups_start;
uint32_t *global_size_start;
uint32_t *local_size_start;
struct pipe_box box;
struct pipe_transfer *transfer = NULL;
+ if (!shader)
+ return;
if (shader->input_size == 0) {
return;
}
-
+ input_size = shader->input_size + 36;
if (!shader->kernel_param) {
/* Add space for the grid dimensions */
shader->kernel_param = (struct r600_resource *)
}
static void evergreen_emit_dispatch(struct r600_context *rctx,
- const struct pipe_grid_info *info)
+ const struct pipe_grid_info *info,
+ uint32_t indirect_grid[3])
{
int i;
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
+ bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
unsigned num_waves;
unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
unsigned wave_divisor = (16 * num_pipes);
int group_size = 1;
int grid_size = 1;
- unsigned lds_size = shader->local_size / 4 +
- shader->bc.nlds_dw;
+ unsigned lds_size = shader->local_size / 4;
+ if (shader->ir_type != PIPE_SHADER_IR_TGSI)
+ lds_size += shader->bc.nlds_dw;
/* Calculate group_size/grid_size */
for (i = 0; i < 3; i++) {
radeon_compute_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC,
lds_size | (num_waves << 14));
- /* Dispatch packet */
- radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
- radeon_emit(cs, info->grid[0]);
- radeon_emit(cs, info->grid[1]);
- radeon_emit(cs, info->grid[2]);
- /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
- radeon_emit(cs, 1);
+ if (info->indirect) {
+ radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, render_cond_bit));
+ radeon_emit(cs, indirect_grid[0]);
+ radeon_emit(cs, indirect_grid[1]);
+ radeon_emit(cs, indirect_grid[2]);
+ radeon_emit(cs, 1);
+ } else {
+ /* Dispatch packet */
+ radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, render_cond_bit));
+ radeon_emit(cs, info->grid[0]);
+ radeon_emit(cs, info->grid[1]);
+ radeon_emit(cs, info->grid[2]);
+ /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
+ radeon_emit(cs, 1);
+ }
if (rctx->is_debug)
eg_trace_emit(rctx);
}
-static void compute_emit_cs(struct r600_context *rctx,
- const struct pipe_grid_info *info)
+static void compute_setup_cbs(struct r600_context *rctx)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
unsigned i;
- /* make sure that the gfx ring is only one active */
- if (radeon_emitted(rctx->b.dma.cs, 0)) {
- rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
- }
-
- /* Initialize all the compute-related registers.
- *
- * See evergreen_init_atom_start_compute_cs() in this file for the list
- * of registers initialized by the start_compute_cs_cmd atom.
- */
- r600_emit_command_buffer(cs, &rctx->start_compute_cs_cmd);
-
- /* emit config state */
- if (rctx->b.chip_class == EVERGREEN)
- r600_emit_atom(rctx, &rctx->config_state.atom);
-
- rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
- r600_flush_emit(rctx);
-
/* Emit colorbuffers. */
/* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */
for (i = 0; i < 8 && i < rctx->framebuffer.state.nr_cbufs; i++) {
/* Set CB_TARGET_MASK XXX: Use cb_misc_state */
radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK,
- rctx->compute_cb_target_mask);
+ rctx->compute_cb_target_mask);
+}
+
+static void compute_emit_cs(struct r600_context *rctx,
+ const struct pipe_grid_info *info)
+{
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+ bool compute_dirty = false;
+ struct r600_pipe_shader *current;
+ struct r600_shader_atomic combined_atomics[8];
+ uint8_t atomic_used_mask;
+ uint32_t indirect_grid[3] = { 0, 0, 0 };
+ /* make sure that the gfx ring is only one active */
+ if (radeon_emitted(rctx->b.dma.cs, 0)) {
+ rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ }
+
+ r600_update_compressed_resource_state(rctx, true);
- /* Emit vertex buffer state */
- rctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(rctx->cs_vertex_buffer_state.dirty_mask);
- r600_emit_atom(rctx, &rctx->cs_vertex_buffer_state.atom);
+ if (!rctx->cmd_buf_is_compute) {
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ rctx->cmd_buf_is_compute = true;
+ }
+
+ if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) {
+ r600_shader_select(&rctx->b.b, rctx->cs_shader_state.shader->sel, &compute_dirty);
+ current = rctx->cs_shader_state.shader->sel->current;
+ if (compute_dirty) {
+ rctx->cs_shader_state.atom.num_dw = current->command_buffer.num_dw;
+ r600_context_add_resource_size(&rctx->b.b, (struct pipe_resource *)current->bo);
+ r600_set_atom_dirty(rctx, &rctx->cs_shader_state.atom, true);
+ }
+
+ bool need_buf_const = current->shader.uses_tex_buffers ||
+ current->shader.has_txq_cube_array_z_comp;
+
+ if (info->indirect) {
+ struct r600_resource *indirect_resource = (struct r600_resource *)info->indirect;
+ unsigned *data = r600_buffer_map_sync_with_rings(&rctx->b, indirect_resource, PIPE_TRANSFER_READ);
+ unsigned offset = info->indirect_offset / 4;
+ indirect_grid[0] = data[offset];
+ indirect_grid[1] = data[offset + 1];
+ indirect_grid[2] = data[offset + 2];
+ }
+ for (int i = 0; i < 3; i++) {
+ rctx->cs_block_grid_sizes[i] = info->block[i];
+ rctx->cs_block_grid_sizes[i + 4] = info->indirect ? indirect_grid[i] : info->grid[i];
+ }
+ rctx->cs_block_grid_sizes[3] = rctx->cs_block_grid_sizes[7] = 0;
+ rctx->driver_consts[PIPE_SHADER_COMPUTE].cs_block_grid_size_dirty = true;
+
+ evergreen_emit_atomic_buffer_setup_count(rctx, current, combined_atomics, &atomic_used_mask);
+ r600_need_cs_space(rctx, 0, true, util_bitcount(atomic_used_mask));
+
+ if (need_buf_const) {
+ eg_setup_buffer_constants(rctx, PIPE_SHADER_COMPUTE);
+ }
+ r600_update_driver_const_buffers(rctx, true);
+
+ evergreen_emit_atomic_buffer_setup(rctx, true, combined_atomics, atomic_used_mask);
+ if (atomic_used_mask) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+ } else
+ r600_need_cs_space(rctx, 0, true, 0);
+
+ /* Initialize all the compute-related registers.
+ *
+ * See evergreen_init_atom_start_compute_cs() in this file for the list
+ * of registers initialized by the start_compute_cs_cmd atom.
+ */
+ r600_emit_command_buffer(cs, &rctx->start_compute_cs_cmd);
+
+ /* emit config state */
+ if (rctx->b.chip_class == EVERGREEN) {
+ if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) {
+ radeon_set_config_reg_seq(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, 3);
+ radeon_emit(cs, S_008C04_NUM_CLAUSE_TEMP_GPRS(rctx->r6xx_num_clause_temp_gprs));
+ radeon_emit(cs, 0);
+ radeon_emit(cs, 0);
+ radeon_set_config_reg(cs, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (1 << 8));
+ } else
+ r600_emit_atom(rctx, &rctx->config_state.atom);
+ }
+
+ rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
+ r600_flush_emit(rctx);
+
+ if (rctx->cs_shader_state.shader->ir_type != PIPE_SHADER_IR_TGSI) {
+
+ compute_setup_cbs(rctx);
+
+ /* Emit vertex buffer state */
+ rctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(rctx->cs_vertex_buffer_state.dirty_mask);
+ r600_emit_atom(rctx, &rctx->cs_vertex_buffer_state.atom);
+ } else {
+ uint32_t rat_mask;
+
+ rat_mask = evergreen_construct_rat_mask(rctx, &rctx->cb_misc_state, 0);
+ radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK,
+ rat_mask);
+ }
+
+ r600_emit_atom(rctx, &rctx->b.render_cond_atom);
/* Emit constant buffer state */
r600_emit_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE].atom);
/* Emit sampler view (texture resource) state */
r600_emit_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views.atom);
- /* Emit compute shader state */
+ /* Emit images state */
+ r600_emit_atom(rctx, &rctx->compute_images.atom);
+
+ /* Emit buffers state */
+ r600_emit_atom(rctx, &rctx->compute_buffers.atom);
+
+ /* Emit shader state */
r600_emit_atom(rctx, &rctx->cs_shader_state.atom);
/* Emit dispatch state and dispatch packet */
- evergreen_emit_dispatch(rctx, info);
+ evergreen_emit_dispatch(rctx, info, indirect_grid);
/* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
*/
radeon_emit(cs, PKT3C(PKT3_DEALLOC_STATE, 0, 0));
radeon_emit(cs, 0);
}
+ if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI)
+ evergreen_emit_atomic_buffer_save(rctx, true, combined_atomics, &atomic_used_mask);
#if 0
COMPUTE_DBG(rctx->screen, "cdw: %i\n", cs->cdw);
struct r600_cs_shader_state *state =
(struct r600_cs_shader_state*)atom;
struct r600_pipe_compute *shader = state->shader;
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
uint64_t va;
struct r600_resource *code_bo;
unsigned ngpr, nstack;
- code_bo = shader->code_bo;
- va = shader->code_bo->gpu_address + state->pc;
- ngpr = shader->bc.ngpr;
- nstack = shader->bc.nstack;
+ if (shader->ir_type == PIPE_SHADER_IR_TGSI) {
+ code_bo = shader->sel->current->bo;
+ va = shader->sel->current->bo->gpu_address;
+ ngpr = shader->sel->current->shader.bc.ngpr;
+ nstack = shader->sel->current->shader.bc.nstack;
+ } else {
+ code_bo = shader->code_bo;
+ va = shader->code_bo->gpu_address + state->pc;
+ ngpr = shader->bc.ngpr;
+ nstack = shader->bc.nstack;
+ }
radeon_compute_set_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
- S_0288D4_NUM_GPRS(ngpr)
- | S_0288D4_STACK_SIZE(nstack));
+ S_0288D4_NUM_GPRS(ngpr) |
+ S_0288D4_DX10_CLAMP(1) |
+ S_0288D4_STACK_SIZE(nstack));
radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0));
struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
boolean use_kill;
- rctx->cs_shader_state.pc = info->pc;
- /* Get the config information for this kernel. */
- r600_shader_binary_read_config(&shader->binary, &shader->bc,
- info->pc, &use_kill);
+ if (shader->ir_type != PIPE_SHADER_IR_TGSI) {
+ rctx->cs_shader_state.pc = info->pc;
+ /* Get the config information for this kernel. */
+ r600_shader_binary_read_config(&shader->binary, &shader->bc,
+ info->pc, &use_kill);
+ } else {
+ use_kill = false;
+ rctx->cs_shader_state.pc = 0;
+ }
#endif
COMPUTE_DBG(rctx->screen, "*** evergreen_launch_grid: pc = %u\n", info->pc);
r600_init_command_buffer(cb, 256);
cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
- /* This must be first. */
- r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
- r600_store_value(cb, 0x80000000);
- r600_store_value(cb, 0x80000000);
-
/* We're setting config registers here. */
r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
break;
}
- /* Config Registers */
- if (rctx->b.chip_class < CAYMAN)
- evergreen_init_common_regs(rctx, cb, rctx->b.chip_class, rctx->b.family,
- rctx->screen->b.info.drm_minor);
- else
- cayman_init_common_regs(cb, rctx->b.chip_class, rctx->b.family,
- rctx->screen->b.info.drm_minor);
-
/* The primitive type always needs to be POINTLIST for compute. */
r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
V_008958_DI_PT_POINTLIST);