unsigned offset,
struct pipe_resource * buffer)
{
- struct pipe_vertex_buffer *vb = &rctx->cs_vertex_buffer[vb_index];
+ struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
+ struct pipe_vertex_buffer *vb = &state->vb[vb_index];
vb->stride = 1;
vb->buffer_offset = offset;
vb->buffer = buffer;
vb->user_buffer = NULL;
- r600_inval_vertex_cache(rctx);
- r600_atom_dirty(rctx, &rctx->cs_vertex_buffer_state);
+ rctx->flags |= rctx->has_vertex_cache ? R600_CONTEXT_VTX_FLUSH : R600_CONTEXT_TEX_FLUSH;
+ state->enabled_mask |= 1 << vb_index;
+ state->dirty_mask |= 1 << vb_index;
+ r600_atom_dirty(rctx, &state->atom);
}
const struct u_resource_vtbl r600_global_buffer_vtbl =
const const struct pipe_compute_state *cso)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
+ struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
+ void *p;
#ifdef HAVE_OPENCL
const struct pipe_llvm_program_header * header;
code = cso->prog + sizeof(struct pipe_llvm_program_header);
#endif
- if (!ctx->screen->screen.get_param(&ctx->screen->screen,
- PIPE_CAP_COMPUTE)) {
- fprintf(stderr, "Compute is not supported\n");
- return NULL;
- }
- struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
-
shader->ctx = (struct r600_context*)ctx;
shader->resources = (struct evergreen_compute_resource*)
CALLOC(sizeof(struct evergreen_compute_resource),
r600_compute_shader_create(ctx_, shader->mod, &shader->bc);
#endif
+ shader->shader_code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
+ shader->bc.ndw * 4);
+
+ p = ctx->ws->buffer_map(shader->shader_code_bo->cs_buf, ctx->cs,
+ PIPE_TRANSFER_WRITE);
+
+ memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
+ ctx->ws->buffer_unmap(shader->shader_code_bo->cs_buf);
return shader;
}
COMPUTE_DBG("*** evergreen_bind_compute_state\n");
- ctx->cs_shader = (struct r600_pipe_compute *)state;
-
- if (!ctx->cs_shader->shader_code_bo) {
-
- ctx->cs_shader->shader_code_bo =
- r600_compute_buffer_alloc_vram(ctx->screen,
- ctx->cs_shader->bc.ndw * 4);
-
- void *p = ctx->ws->buffer_map(
- ctx->cs_shader->shader_code_bo->cs_buf,
- ctx->cs, PIPE_TRANSFER_WRITE);
-
- memcpy(p, ctx->cs_shader->bc.bytecode, ctx->cs_shader->bc.ndw * 4);
-
- ctx->ws->buffer_unmap(ctx->cs_shader->shader_code_bo->cs_buf);
-
- }
-
- struct evergreen_compute_resource* res = get_empty_res(ctx->cs_shader,
- COMPUTE_RESOURCE_SHADER, 0);
-
- if (ctx->chip_class < CAYMAN) {
- evergreen_reg_set(res, R_008C0C_SQ_GPR_RESOURCE_MGMT_3,
- S_008C0C_NUM_LS_GPRS(ctx->cs_shader->bc.ngpr));
- }
-
- ///maybe we can use it later
- evergreen_reg_set(res, R_0286C8_SPI_THREAD_GROUPING, 0);
- ///maybe we can use it later
- evergreen_reg_set(res, R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2, 0);
-
- evergreen_reg_set(res, R_0288D4_SQ_PGM_RESOURCES_LS,
- S_0288D4_NUM_GPRS(ctx->cs_shader->bc.ngpr)
- | S_0288D4_STACK_SIZE(ctx->cs_shader->bc.nstack));
- evergreen_reg_set(res, R_0288D8_SQ_PGM_RESOURCES_LS_2, 0);
-
- evergreen_reg_set(res, R_0288D0_SQ_PGM_START_LS, 0);
- res->bo = ctx->cs_shader->shader_code_bo;
- res->usage = RADEON_USAGE_READ;
- res->coher_bo_size = ctx->cs_shader->bc.ndw*4;
-
- r600_inval_shader_cache(ctx);
-
+ ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
}
/* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
const void *input)
{
struct r600_context *ctx = (struct r600_context *)ctx_;
+ struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
int i;
unsigned kernel_parameters_offset_bytes = 36;
uint32_t * num_work_groups_start;
uint32_t * local_size_start;
uint32_t * kernel_parameters_start;
- if (ctx->cs_shader->input_size == 0) {
+ if (shader->input_size == 0) {
return;
}
- if (!ctx->cs_shader->kernel_param) {
- unsigned buffer_size = ctx->cs_shader->input_size;
+ if (!shader->kernel_param) {
+ unsigned buffer_size = shader->input_size;
/* Add space for the grid dimensions */
buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
- ctx->cs_shader->kernel_param =
- r600_compute_buffer_alloc_vram(ctx->screen,
- buffer_size);
+ shader->kernel_param = r600_compute_buffer_alloc_vram(
+ ctx->screen, buffer_size);
}
num_work_groups_start = ctx->ws->buffer_map(
- ctx->cs_shader->kernel_param->cs_buf,
- ctx->cs, PIPE_TRANSFER_WRITE);
+ shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
memcpy(local_size_start, block_layout, 3 * sizeof(uint));
/* Copy the kernel inputs */
- memcpy(kernel_parameters_start, input, ctx->cs_shader->input_size);
+ memcpy(kernel_parameters_start, input, shader->input_size);
for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
- (ctx->cs_shader->input_size / 4); i++) {
+ (shader->input_size / 4); i++) {
COMPUTE_DBG("input %i : %i\n", i,
((unsigned*)num_work_groups_start)[i]);
}
- ctx->ws->buffer_unmap(ctx->cs_shader->kernel_param->cs_buf);
+ ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
///ID=0 is reserved for the parameters
evergreen_cs_set_vertex_buffer(ctx, 0, 0,
- (struct pipe_resource*)ctx->cs_shader->kernel_param);
+ (struct pipe_resource*)shader->kernel_param);
///ID=0 is reserved for parameters
- evergreen_set_const_cache(ctx->cs_shader, 0,
- ctx->cs_shader->kernel_param, ctx->cs_shader->input_size, 0);
+ evergreen_set_const_cache(shader, 0, shader->kernel_param,
+ shader->input_size, 0);
}
-void evergreen_direct_dispatch(
- struct pipe_context *ctx_,
+static void evergreen_emit_direct_dispatch(
+ struct r600_context *rctx,
const uint *block_layout, const uint *grid_layout)
{
- /* This struct r600_context* must be called rctx, because the
- * r600_pipe_state_add_reg macro assumes there is a local variable
- * of type struct r600_context* called rctx.
- */
- struct r600_context *rctx = (struct r600_context *)ctx_;
-
int i;
-
- struct evergreen_compute_resource* res = get_empty_res(rctx->cs_shader,
- COMPUTE_RESOURCE_DISPATCH, 0);
-
- /* Set CB_TARGET_MASK */
- evergreen_reg_set(res, R_028238_CB_TARGET_MASK, rctx->compute_cb_target_mask);
-
- evergreen_reg_set(res, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST);
-
- evergreen_reg_set(res, R_00899C_VGT_COMPUTE_START_X, 0);
- evergreen_reg_set(res, R_0089A0_VGT_COMPUTE_START_Y, 0);
- evergreen_reg_set(res, R_0089A4_VGT_COMPUTE_START_Z, 0);
-
- evergreen_reg_set(res, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, block_layout[0]);
- evergreen_reg_set(res, R_0286F0_SPI_COMPUTE_NUM_THREAD_Y, block_layout[1]);
- evergreen_reg_set(res, R_0286F4_SPI_COMPUTE_NUM_THREAD_Z, block_layout[2]);
-
+ struct radeon_winsys_cs *cs = rctx->cs;
+ unsigned num_waves;
+ unsigned num_pipes = rctx->screen->info.r600_max_pipes;
+ unsigned wave_divisor = (16 * num_pipes);
int group_size = 1;
-
int grid_size = 1;
+ /* XXX: Enable lds and get size from cs_shader_state */
+ unsigned lds_size = 0;
+ /* Calculate group_size/grid_size */
for (i = 0; i < 3; i++) {
group_size *= block_layout[i];
}
grid_size *= grid_layout[i];
}
- evergreen_reg_set(res, R_008970_VGT_NUM_INDICES, group_size);
- evergreen_reg_set(res, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size);
+ /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
+ num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
+ wave_divisor - 1) / wave_divisor;
+
+ COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
+ num_pipes, num_waves);
+
+ /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
+ * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
+ * We may need to allocat the entire LDS space for Compute Shaders.
+ *
+ * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
+ * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
+ */
+
+ r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
- evergreen_emit_raw_value(res, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
- evergreen_emit_raw_value(res, grid_layout[0]);
- evergreen_emit_raw_value(res, grid_layout[1]);
- evergreen_emit_raw_value(res, grid_layout[2]);
- ///VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN
- evergreen_emit_raw_value(res, 1);
+ r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
+ r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
+ r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
+ r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
+
+ r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
+ group_size);
+
+ r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
+ r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
+ r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
+ r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
+
+ r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
+ lds_size | (num_waves << 14));
+
+ /* Dispatch packet */
+ r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
+ r600_write_value(cs, grid_layout[0]);
+ r600_write_value(cs, grid_layout[1]);
+ r600_write_value(cs, grid_layout[2]);
+ /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
+ r600_write_value(cs, 1);
}
-static void compute_emit_cs(struct r600_context *ctx)
+static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
+ const uint *grid_layout)
{
struct radeon_winsys_cs *cs = ctx->cs;
int i;
struct r600_resource *onebo = NULL;
struct r600_pipe_state *cb_state;
+ struct evergreen_compute_resource *resources =
+ ctx->cs_shader_state.shader->resources;
- /* Initialize all the registers common to both 3D and compute. Some
- * 3D only register will be initialized by this atom as well, but
- * this is OK for now.
- *
- * See evergreen_init_atom_start_cs() or cayman_init_atom_start_cs() in
- * evergreen_state.c for the list of registers that are intialized by
- * the start_cs_cmd atom.
- */
- r600_emit_atom(ctx, &ctx->start_cs_cmd.atom);
-
- /* Initialize all the compute specific registers.
+ /* Initialize all the compute-related registers.
*
* See evergreen_init_atom_start_compute_cs() in this file for the list
- * of registers initialized by the start_compuet_cs_cmd atom.
+ * of registers initialized by the start_compute_cs_cmd atom.
*/
r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom);
+ ctx->flags |= R600_CONTEXT_CB_FLUSH;
+ r600_flush_emit(ctx);
+
/* Emit cb_state */
- cb_state = ctx->states[R600_PIPE_STATE_FRAMEBUFFER];
+ cb_state = ctx->states[R600_PIPE_STATE_FRAMEBUFFER];
r600_context_pipe_state_emit(ctx, cb_state, RADEON_CP_PACKET3_COMPUTE_MODE);
+ /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
+ r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
+ ctx->compute_cb_target_mask);
+
+
/* Emit vertex buffer state */
- ctx->cs_vertex_buffer_state.num_dw = 12 * ctx->nr_cs_vertex_buffers;
- r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state);
+ ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
+ r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
+
+ /* Emit compute shader state */
+ r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
for (i = 0; i < get_compute_resource_num(); i++) {
- if (ctx->cs_shader->resources[i].enabled) {
+ if (resources[i].enabled) {
int j;
COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
- for (j = 0; j < ctx->cs_shader->resources[i].cs_end; j++) {
- if (ctx->cs_shader->resources[i].do_reloc[j]) {
- assert(ctx->cs_shader->resources[i].bo);
+ for (j = 0; j < resources[i].cs_end; j++) {
+ if (resources[i].do_reloc[j]) {
+ assert(resources[i].bo);
evergreen_emit_ctx_reloc(ctx,
- ctx->cs_shader->resources[i].bo,
- ctx->cs_shader->resources[i].usage);
+ resources[i].bo,
+ resources[i].usage);
}
- cs->buf[cs->cdw++] = ctx->cs_shader->resources[i].cs[j];
+ cs->buf[cs->cdw++] = resources[i].cs[j];
}
- if (ctx->cs_shader->resources[i].bo) {
- onebo = ctx->cs_shader->resources[i].bo;
+ if (resources[i].bo) {
+ onebo = resources[i].bo;
evergreen_emit_ctx_reloc(ctx,
- ctx->cs_shader->resources[i].bo,
- ctx->cs_shader->resources[i].usage);
+ resources[i].bo,
+ resources[i].usage);
///special case for textures
- if (ctx->cs_shader->resources[i].do_reloc
- [ctx->cs_shader->resources[i].cs_end] == 2) {
+ if (resources[i].do_reloc
+ [resources[i].cs_end] == 2) {
evergreen_emit_ctx_reloc(ctx,
- ctx->cs_shader->resources[i].bo,
- ctx->cs_shader->resources[i].usage);
+ resources[i].bo,
+ resources[i].usage);
}
}
}
}
- /* r600_flush_framebuffer() updates the cb_flush_flags and then
- * calls r600_emit_atom() on the ctx->surface_sync_cmd.atom, which emits
- * a SURFACE_SYNC packet via r600_emit_surface_sync().
- *
- * XXX r600_emit_surface_sync() hardcodes the CP_COHER_SIZE to
- * 0xffffffff, so we will need to add a field to struct
- * r600_surface_sync_cmd if we want to manually set this value.
+ /* Emit dispatch state and dispatch packet */
+ evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
+
+ /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
*/
- r600_flush_framebuffer(ctx, true /* Flush now */);
+ ctx->flags |= R600_CONTEXT_CB_FLUSH;
+ r600_flush_emit(ctx);
#if 0
COMPUTE_DBG("cdw: %i\n", cs->cdw);
}
+
+/**
+ * Emit function for r600_cs_shader_state atom
+ */
+void evergreen_emit_cs_shader(
+ struct r600_context *rctx,
+ struct r600_atom *atom)
+{
+ struct r600_cs_shader_state *state =
+ (struct r600_cs_shader_state*)atom;
+ struct r600_pipe_compute *shader = state->shader;
+ struct radeon_winsys_cs *cs = rctx->cs;
+ uint64_t va;
+
+ va = r600_resource_va(&rctx->screen->screen, &shader->shader_code_bo->b.b);
+
+ r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
+ r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
+ r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
+ S_0288D4_NUM_GPRS(shader->bc.ngpr)
+ | S_0288D4_STACK_SIZE(shader->bc.nstack));
+ r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
+
+ r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
+ r600_write_value(cs, r600_context_bo_reloc(rctx, shader->shader_code_bo,
+ RADEON_USAGE_READ));
+
+ rctx->flags |= R600_CONTEXT_SHADERCONST_FLUSH;
+}
+
static void evergreen_launch_grid(
struct pipe_context *ctx_,
const uint *block_layout, const uint *grid_layout,
uint32_t pc, const void *input)
{
- COMPUTE_DBG("PC: %i\n", pc);
-
struct r600_context *ctx = (struct r600_context *)ctx_;
- unsigned num_waves;
- unsigned num_pipes = ctx->screen->info.r600_max_pipes;
- unsigned wave_divisor = (16 * num_pipes);
- /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
- num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
- wave_divisor - 1) / wave_divisor;
-
- COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
- num_pipes, num_waves);
+ COMPUTE_DBG("PC: %i\n", pc);
- evergreen_set_lds(ctx->cs_shader, 0, 0, num_waves);
evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
- evergreen_direct_dispatch(ctx_, block_layout, grid_layout);
- compute_emit_cs(ctx);
+ compute_emit_cs(ctx, block_layout, grid_layout);
}
static void evergreen_set_compute_resources(struct pipe_context * ctx_,
if (resources[i]->base.writable) {
assert(i+1 < 12);
- evergreen_set_rat(ctx->cs_shader, i+1,
+ evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
(struct r600_resource *)resources[i]->base.texture,
buffer->chunk->start_in_dw*4,
resources[i]->base.texture->width0);
evergreen_cs_set_vertex_buffer(ctx, vtx_id,
buffer->chunk->start_in_dw * 4,
resources[i]->base.texture);
- ctx->nr_cs_vertex_buffers = vtx_id + 1;
}
}
-
}
static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
assert(i+1 < 12);
///FETCH0 = VTX0 (param buffer),
//FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
- evergreen_set_tex_resource(ctx->cs_shader, resource[i], i+2);
+ evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
}
}
}
for (int i = 0; i < num_samplers; i++) {
if (samplers[i]) {
- evergreen_set_sampler_resource(ctx->cs_shader, samplers[i], i);
+ evergreen_set_sampler_resource(
+ ctx->cs_shader_state.shader, samplers[i], i);
}
}
}
*(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
}
- evergreen_set_rat(ctx->cs_shader, 0, pool->bo, 0, pool->size_in_dw * 4);
+ evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
evergreen_cs_set_vertex_buffer(ctx, 1, 0,
(struct pipe_resource*)pool->bo);
}
int num_threads;
int num_stack_entries;
- /* We aren't passing the EMIT_EARLY flag as the third argument
- * because we will be emitting this atom manually in order to
- * ensure it gets emitted after the start_cs_cmd atom.
+ /* since all required registers are initialised in the
+ * start_compute_cs_cmd atom, we can EMIT_EARLY here.
*/
- r600_init_command_buffer(cb, 256, 0);
+ r600_init_command_buffer(ctx, cb, 1, 256);
cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
switch (ctx->family) {
}
/* Config Registers */
+ evergreen_init_common_regs(cb, ctx->chip_class
+ , ctx->family, ctx->screen->info.drm_minor);
+
+ /* The primitive type always needs to be POINTLIST for compute. */
+ r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
+ V_008958_DI_PT_POINTLIST);
+
if (ctx->chip_class < CAYMAN) {
/* These registers control which simds can be used by each stage.
/* We always use at least two vertex buffers for compute, one for
* parameters and one for global memory */
- ctx->nr_cs_vertex_buffers = 2;
+ ctx->cs_vertex_buffer_state.enabled_mask =
+ ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
}