vb->buffer = buffer;
vb->user_buffer = NULL;
- r600_inval_vertex_cache(rctx);
+ /* The vertex instructions in the compute shaders use the texture cache,
+ * so we need to invalidate it. */
+ rctx->flags |= R600_CONTEXT_TEX_FLUSH;
state->enabled_mask |= 1 << vb_index;
state->dirty_mask |= 1 << vb_index;
- r600_atom_dirty(rctx, &state->atom);
+ state->atom.dirty = true;
}
-const struct u_resource_vtbl r600_global_buffer_vtbl =
+static const struct u_resource_vtbl r600_global_buffer_vtbl =
{
u_default_resource_get_handle, /* get_handle */
r600_compute_global_buffer_destroy, /* resource_destroy */
- r600_compute_global_get_transfer, /* get_transfer */
- r600_compute_global_transfer_destroy, /* transfer_destroy */
r600_compute_global_transfer_map, /* transfer_map */
r600_compute_global_transfer_flush_region,/* transfer_flush_region */
r600_compute_global_transfer_unmap, /* transfer_unmap */
{
struct r600_context *ctx = (struct r600_context *)ctx_;
struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
- void *p;
#ifdef HAVE_OPENCL
const struct pipe_llvm_program_header * header;
const unsigned char * code;
+ unsigned i;
COMPUTE_DBG("*** evergreen_create_compute_state\n");
shader->input_size = cso->req_input_mem;
#ifdef HAVE_OPENCL
- shader->mod = llvm_parse_bitcode(code, header->num_bytes);
+ shader->num_kernels = llvm_get_num_kernels(code, header->num_bytes);
+ shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
- r600_compute_shader_create(ctx_, shader->mod, &shader->bc);
+ for (i = 0; i < shader->num_kernels; i++) {
+ struct r600_kernel *kernel = &shader->kernels[i];
+ kernel->llvm_module = llvm_get_kernel_module(i, code,
+ header->num_bytes);
+ }
#endif
- shader->shader_code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
- shader->bc.ndw * 4);
-
- p = ctx->ws->buffer_map(shader->shader_code_bo->cs_buf, ctx->cs,
- PIPE_TRANSFER_WRITE);
-
- memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
- ctx->ws->buffer_unmap(shader->shader_code_bo->cs_buf);
return shader;
}
shader->input_size, 0);
}
-void evergreen_direct_dispatch(
- struct pipe_context *ctx_,
+static void evergreen_emit_direct_dispatch(
+ struct r600_context *rctx,
const uint *block_layout, const uint *grid_layout)
{
- /* This struct r600_context* must be called rctx, because the
- * r600_pipe_state_add_reg macro assumes there is a local variable
- * of type struct r600_context* called rctx.
- */
- struct r600_context *rctx = (struct r600_context *)ctx_;
- struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
-
int i;
-
- struct evergreen_compute_resource* res = get_empty_res(shader,
- COMPUTE_RESOURCE_DISPATCH, 0);
-
- /* Set CB_TARGET_MASK */
- evergreen_reg_set(res, R_028238_CB_TARGET_MASK, rctx->compute_cb_target_mask);
-
- evergreen_reg_set(res, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST);
-
- evergreen_reg_set(res, R_00899C_VGT_COMPUTE_START_X, 0);
- evergreen_reg_set(res, R_0089A0_VGT_COMPUTE_START_Y, 0);
- evergreen_reg_set(res, R_0089A4_VGT_COMPUTE_START_Z, 0);
-
- evergreen_reg_set(res, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, block_layout[0]);
- evergreen_reg_set(res, R_0286F0_SPI_COMPUTE_NUM_THREAD_Y, block_layout[1]);
- evergreen_reg_set(res, R_0286F4_SPI_COMPUTE_NUM_THREAD_Z, block_layout[2]);
-
+ struct radeon_winsys_cs *cs = rctx->cs;
+ unsigned num_waves;
+ unsigned num_pipes = rctx->screen->info.r600_max_pipes;
+ unsigned wave_divisor = (16 * num_pipes);
int group_size = 1;
-
int grid_size = 1;
+ /* XXX: Enable lds and get size from cs_shader_state */
+ unsigned lds_size = 0;
+ /* Calculate group_size/grid_size */
for (i = 0; i < 3; i++) {
group_size *= block_layout[i];
}
grid_size *= grid_layout[i];
}
- evergreen_reg_set(res, R_008970_VGT_NUM_INDICES, group_size);
- evergreen_reg_set(res, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size);
+ /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
+ num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
+ wave_divisor - 1) / wave_divisor;
+
+ COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
+ num_pipes, num_waves);
+
+ /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
+ * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
+ * We may need to allocat the entire LDS space for Compute Shaders.
+ *
+ * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
+ * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
+ */
+
+ r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
+
+ r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
+ r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
+ r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
+ r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
+
+ r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
+ group_size);
- evergreen_emit_raw_value(res, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
- evergreen_emit_raw_value(res, grid_layout[0]);
- evergreen_emit_raw_value(res, grid_layout[1]);
- evergreen_emit_raw_value(res, grid_layout[2]);
- ///VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN
- evergreen_emit_raw_value(res, 1);
+ r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
+ r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
+ r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
+ r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
+
+ r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
+ lds_size | (num_waves << 14));
+
+ /* Dispatch packet */
+ r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
+ r600_write_value(cs, grid_layout[0]);
+ r600_write_value(cs, grid_layout[1]);
+ r600_write_value(cs, grid_layout[2]);
+ /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
+ r600_write_value(cs, 1);
}
-static void compute_emit_cs(struct r600_context *ctx)
+static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
+ const uint *grid_layout)
{
struct radeon_winsys_cs *cs = ctx->cs;
+ unsigned flush_flags = 0;
int i;
struct r600_resource *onebo = NULL;
- struct r600_pipe_state *cb_state;
struct evergreen_compute_resource *resources =
ctx->cs_shader_state.shader->resources;
- /* Initialize all the registers common to both 3D and compute. Some
- * 3D only register will be initialized by this atom as well, but
- * this is OK for now.
- *
- * See evergreen_init_atom_start_cs() or cayman_init_atom_start_cs() in
- * evergreen_state.c for the list of registers that are intialized by
- * the start_cs_cmd atom.
- */
- r600_emit_atom(ctx, &ctx->start_cs_cmd.atom);
-
- /* Initialize all the compute specific registers.
+ /* Initialize all the compute-related registers.
*
* See evergreen_init_atom_start_compute_cs() in this file for the list
- * of registers initialized by the start_compuet_cs_cmd atom.
+ * of registers initialized by the start_compute_cs_cmd atom.
*/
- r600_emit_atom(ctx, &ctx->start_compute_cs_cmd.atom);
+ r600_emit_command_buffer(ctx->cs, &ctx->start_compute_cs_cmd);
+
+ ctx->flags |= R600_CONTEXT_CB_FLUSH;
+ r600_flush_emit(ctx);
+
+ /* Emit colorbuffers. */
+ for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
+ struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
+ unsigned reloc = r600_context_bo_reloc(ctx, (struct r600_resource*)cb->base.texture,
+ RADEON_USAGE_READWRITE);
+
+ r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
+ r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
+ r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
+ r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
+ r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
+ r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
+ r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
+ r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
+
+ r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
+ r600_write_value(cs, reloc);
+
+ if (!ctx->keep_tiling_flags) {
+ r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
+ r600_write_value(cs, reloc);
+ }
+
+ r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
+ r600_write_value(cs, reloc);
+ }
+
+ /* Set CB_TARGET_MASK XXX: Use cb_misc_state */
+ r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
+ ctx->compute_cb_target_mask);
- /* Emit cb_state */
- cb_state = ctx->states[R600_PIPE_STATE_FRAMEBUFFER];
- r600_context_pipe_state_emit(ctx, cb_state, RADEON_CP_PACKET3_COMPUTE_MODE);
/* Emit vertex buffer state */
ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
}
}
- /* r600_flush_framebuffer() updates the cb_flush_flags and then
- * calls r600_emit_atom() on the ctx->surface_sync_cmd.atom, which emits
- * a SURFACE_SYNC packet via r600_emit_surface_sync().
- *
- * XXX r600_emit_surface_sync() hardcodes the CP_COHER_SIZE to
- * 0xffffffff, so we will need to add a field to struct
- * r600_surface_sync_cmd if we want to manually set this value.
+ /* Emit dispatch state and dispatch packet */
+ evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
+
+ /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
*/
- r600_flush_framebuffer(ctx, true /* Flush now */);
+ ctx->flags |= R600_CONTEXT_CB_FLUSH;
+ r600_flush_emit(ctx);
#if 0
COMPUTE_DBG("cdw: %i\n", cs->cdw);
}
#endif
- ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE);
+ flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE;
+ if (ctx->keep_tiling_flags) {
+ flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
+ }
+
+ ctx->ws->cs_flush(ctx->cs, flush_flags);
ctx->pm4_dirty_cdwords = 0;
ctx->flags = 0;
struct r600_cs_shader_state *state =
(struct r600_cs_shader_state*)atom;
struct r600_pipe_compute *shader = state->shader;
+ struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
struct radeon_winsys_cs *cs = rctx->cs;
uint64_t va;
- va = r600_resource_va(&rctx->screen->screen, &shader->shader_code_bo->b.b);
+ va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
- S_0288D4_NUM_GPRS(shader->bc.ngpr)
- | S_0288D4_STACK_SIZE(shader->bc.nstack));
+ S_0288D4_NUM_GPRS(kernel->bc.ngpr)
+ | S_0288D4_STACK_SIZE(kernel->bc.nstack));
r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
- r600_write_value(cs, r600_context_bo_reloc(rctx, shader->shader_code_bo,
+ r600_write_value(cs, r600_context_bo_reloc(rctx, kernel->code_bo,
RADEON_USAGE_READ));
- r600_inval_shader_cache(rctx);
+ rctx->flags |= R600_CONTEXT_SHADERCONST_FLUSH;
}
static void evergreen_launch_grid(
const uint *block_layout, const uint *grid_layout,
uint32_t pc, const void *input)
{
- COMPUTE_DBG("PC: %i\n", pc);
-
struct r600_context *ctx = (struct r600_context *)ctx_;
- unsigned num_waves;
- unsigned num_pipes = ctx->screen->info.r600_max_pipes;
- unsigned wave_divisor = (16 * num_pipes);
- /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
- num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
- wave_divisor - 1) / wave_divisor;
+#ifdef HAVE_OPENCL
+ COMPUTE_DBG("*** evergreen_launch_grid: pc = %u\n", pc);
- COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
- num_pipes, num_waves);
+ struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
+ if (!shader->kernels[pc].code_bo) {
+ void *p;
+ struct r600_kernel *kernel = &shader->kernels[pc];
+ r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
+ kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
+ kernel->bc.ndw * 4);
+ p = ctx->ws->buffer_map(kernel->code_bo->cs_buf, ctx->cs,
+ PIPE_TRANSFER_WRITE);
+ memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
+ ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
+ }
+#endif
- evergreen_set_lds(ctx->cs_shader_state.shader, 0, 0, num_waves);
+ ctx->cs_shader_state.kernel_index = pc;
evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
- evergreen_direct_dispatch(ctx_, block_layout, grid_layout);
- compute_emit_cs(ctx);
+ compute_emit_cs(ctx, block_layout, grid_layout);
}
static void evergreen_set_compute_resources(struct pipe_context * ctx_,
int num_threads;
int num_stack_entries;
- /* We aren't passing the EMIT_EARLY flag as the third argument
- * because we will be emitting this atom manually in order to
- * ensure it gets emitted after the start_cs_cmd atom.
+ /* since all required registers are initialised in the
+ * start_compute_cs_cmd atom, we can EMIT_EARLY here.
*/
- r600_init_command_buffer(cb, 256, 0);
+ r600_init_command_buffer(cb, 256);
cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE;
+ /* This must be first. */
+ r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
+ r600_store_value(cb, 0x80000000);
+ r600_store_value(cb, 0x80000000);
+
+ /* We're setting config registers here. */
+ r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+
switch (ctx->family) {
case CHIP_CEDAR:
default:
}
/* Config Registers */
+ evergreen_init_common_regs(cb, ctx->chip_class
+ , ctx->family, ctx->screen->info.drm_minor);
+
+ /* The primitive type always needs to be POINTLIST for compute. */
+ r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
+ V_008958_DI_PT_POINTLIST);
+
if (ctx->chip_class < CAYMAN) {
/* These registers control which simds can be used by each stage.
free(res);
}
-void* r600_compute_global_transfer_map(
+void *r600_compute_global_transfer_map(
struct pipe_context *ctx_,
- struct pipe_transfer* transfer)
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
{
+ struct r600_context *rctx = (struct r600_context*)ctx_;
+ struct compute_memory_pool *pool = rctx->screen->global_pool;
+ struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
+ struct r600_resource_global* buffer =
+ (struct r600_resource_global*)resource;
+ uint32_t* map;
+
+ compute_memory_finalize_pending(pool, ctx_);
+
+ assert(resource->target == PIPE_BUFFER);
+
+ COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
+ "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
+ "width = %u, height = %u, depth = %u)\n", level, usage,
+ box->x, box->y, box->z, box->width, box->height,
+ box->depth);
+
+ transfer->resource = resource;
+ transfer->level = level;
+ transfer->usage = usage;
+ transfer->box = *box;
+ transfer->stride = 0;
+ transfer->layer_stride = 0;
+
assert(transfer->resource->target == PIPE_BUFFER);
assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
assert(transfer->box.x >= 0);
assert(transfer->box.y == 0);
assert(transfer->box.z == 0);
- struct r600_context *ctx = (struct r600_context *)ctx_;
- struct r600_resource_global* buffer =
- (struct r600_resource_global*)transfer->resource;
-
- uint32_t* map;
///TODO: do it better, mapping is not possible if the pool is too big
- if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
- ctx->cs, transfer->usage))) {
+ COMPUTE_DBG("* r600_compute_global_transfer_map()\n");
+
+ if (!(map = rctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
+ rctx->cs, transfer->usage))) {
+ util_slab_free(&rctx->pool_transfers, transfer);
return NULL;
}
- COMPUTE_DBG("buffer start: %lli\n", buffer->chunk->start_in_dw);
+ *ptransfer = transfer;
+
+ COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) "
+ "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
}
struct r600_resource_global* buffer =
(struct r600_resource_global*)transfer->resource;
- ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
-}
+ COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n");
-struct pipe_transfer * r600_compute_global_get_transfer(
- struct pipe_context *ctx_,
- struct pipe_resource *resource,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box)
-{
- struct r600_context *ctx = (struct r600_context *)ctx_;
- struct compute_memory_pool *pool = ctx->screen->global_pool;
-
- compute_memory_finalize_pending(pool, ctx_);
-
- assert(resource->target == PIPE_BUFFER);
- struct r600_context *rctx = (struct r600_context*)ctx_;
- struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
-
- transfer->resource = resource;
- transfer->level = level;
- transfer->usage = usage;
- transfer->box = *box;
- transfer->stride = 0;
- transfer->layer_stride = 0;
- transfer->data = NULL;
-
- /* Note strides are zero, this is ok for buffers, but not for
- * textures 2d & higher at least.
- */
- return transfer;
-}
-
-void r600_compute_global_transfer_destroy(
- struct pipe_context *ctx_,
- struct pipe_transfer *transfer)
-{
- struct r600_context *rctx = (struct r600_context*)ctx_;
- util_slab_free(&rctx->pool_transfers, transfer);
+ ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
+ util_slab_free(&ctx->pool_transfers, transfer);
}
void r600_compute_global_transfer_flush_region(