#include "util/u_inlines.h"
#include "util/u_framebuffer.h"
#include "pipebuffer/pb_buffer.h"
-#include "r600.h"
#include "evergreend.h"
#include "r600_resource.h"
#include "r600_shader.h"
#include "r600_pipe.h"
#include "r600_formats.h"
#include "evergreen_compute.h"
-#include "r600_hw_context_priv.h"
#include "evergreen_compute_internal.h"
#include "compute_memory_pool.h"
#ifdef HAVE_OPENCL
-#include "llvm_wrapper.h"
+#include "radeon_llvm_util.h"
#endif
/**
*/
+struct r600_resource* r600_compute_buffer_alloc_vram(
+ struct r600_screen *screen,
+ unsigned size)
+{
+ struct pipe_resource * buffer = NULL;
+ assert(size);
+
+ buffer = pipe_buffer_create(
+ (struct pipe_screen*) screen,
+ PIPE_BIND_CUSTOM,
+ PIPE_USAGE_IMMUTABLE,
+ size);
+
+ return (struct r600_resource *)buffer;
+}
+
+
+static void evergreen_set_rat(
+ struct r600_pipe_compute *pipe,
+ int id,
+ struct r600_resource* bo,
+ int start,
+ int size)
+{
+ struct pipe_surface rat_templ;
+ struct r600_surface *surf = NULL;
+ struct r600_context *rctx = NULL;
+
+ assert(id < 12);
+ assert((size & 3) == 0);
+ assert((start & 0xFF) == 0);
+
+ rctx = pipe->ctx;
+
+ COMPUTE_DBG(rctx->screen, "bind rat: %i \n", id);
+
+ /* Create the RAT surface */
+ memset(&rat_templ, 0, sizeof(rat_templ));
+ rat_templ.format = PIPE_FORMAT_R32_UINT;
+ rat_templ.u.tex.level = 0;
+ rat_templ.u.tex.first_layer = 0;
+ rat_templ.u.tex.last_layer = 0;
+
+ /* Add the RAT the list of color buffers */
+ pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->context.create_surface(
+ (struct pipe_context *)pipe->ctx,
+ (struct pipe_resource *)bo, &rat_templ);
+
+ /* Update the number of color buffers */
+ pipe->ctx->framebuffer.state.nr_cbufs =
+ MAX2(id + 1, pipe->ctx->framebuffer.state.nr_cbufs);
+
+ /* Update the cb_target_mask
+ * XXX: I think this is a potential spot for bugs once we start doing
+ * GL interop. cb_target_mask may be modified in the 3D sections
+ * of this driver. */
+ pipe->ctx->compute_cb_target_mask |= (0xf << (id * 4));
+
+ surf = (struct r600_surface*)pipe->ctx->framebuffer.state.cbufs[id];
+ evergreen_init_color_surface_rat(rctx, surf);
+}
+
static void evergreen_cs_set_vertex_buffer(
struct r600_context * rctx,
unsigned vb_index,
state->atom.dirty = true;
}
+static void evergreen_cs_set_constant_buffer(
+ struct r600_context * rctx,
+ unsigned cb_index,
+ unsigned offset,
+ unsigned size,
+ struct pipe_resource * buffer)
+{
+ struct pipe_constant_buffer cb;
+ cb.buffer_size = size;
+ cb.buffer_offset = offset;
+ cb.buffer = buffer;
+ cb.user_buffer = NULL;
+
+ rctx->context.set_constant_buffer(&rctx->context, PIPE_SHADER_COMPUTE, cb_index, &cb);
+}
+
static const struct u_resource_vtbl r600_global_buffer_vtbl =
{
u_default_resource_get_handle, /* get_handle */
#endif
shader->ctx = (struct r600_context*)ctx;
- shader->resources = (struct evergreen_compute_resource*)
- CALLOC(sizeof(struct evergreen_compute_resource),
- get_compute_resource_num());
shader->local_size = cso->req_local_mem; ///TODO: assert it
shader->private_size = cso->req_private_mem;
shader->input_size = cso->req_input_mem;
#ifdef HAVE_OPENCL
- shader->num_kernels = llvm_get_num_kernels(code, header->num_bytes);
+ shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes);
shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
for (i = 0; i < shader->num_kernels; i++) {
struct r600_kernel *kernel = &shader->kernels[i];
- kernel->llvm_module = llvm_get_kernel_module(i, code,
+ kernel->llvm_module = radeon_llvm_get_kernel_module(i, code,
header->num_bytes);
}
#endif
{
struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
- free(shader->resources);
free(shader);
}
struct r600_context *ctx = (struct r600_context *)ctx_;
struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
int i;
- unsigned kernel_parameters_offset_bytes = 36;
+ /* We need to reserve 9 dwords (36 bytes) for implicit kernel
+ * parameters.
+ */
+ unsigned input_size = shader->input_size + 36;
uint32_t * num_work_groups_start;
uint32_t * global_size_start;
uint32_t * local_size_start;
uint32_t * kernel_parameters_start;
+ struct pipe_box box;
+ struct pipe_transfer *transfer = NULL;
if (shader->input_size == 0) {
return;
}
if (!shader->kernel_param) {
- unsigned buffer_size = shader->input_size;
-
/* Add space for the grid dimensions */
- buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
- shader->kernel_param = r600_compute_buffer_alloc_vram(
- ctx->screen, buffer_size);
+ shader->kernel_param = (struct r600_resource *)
+ pipe_buffer_create(ctx_->screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_IMMUTABLE, input_size);
}
- num_work_groups_start = r600_buffer_mmap_sync_with_rings(ctx, shader->kernel_param, PIPE_TRANSFER_WRITE);
+ u_box_1d(0, input_size, &box);
+ num_work_groups_start = ctx_->transfer_map(ctx_,
+ (struct pipe_resource*)shader->kernel_param,
+ 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ &box, &transfer);
global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
/* Copy the kernel inputs */
memcpy(kernel_parameters_start, input, shader->input_size);
- for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
- (shader->input_size / 4); i++) {
+ for (i = 0; i < (input_size / 4); i++) {
COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
((unsigned*)num_work_groups_start)[i]);
}
- ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
+ ctx_->transfer_unmap(ctx_, transfer);
- ///ID=0 is reserved for the parameters
- evergreen_cs_set_vertex_buffer(ctx, 0, 0,
+ /* ID=0 is reserved for the parameters */
+ evergreen_cs_set_constant_buffer(ctx, 0, 0, input_size,
(struct pipe_resource*)shader->kernel_param);
- ///ID=0 is reserved for parameters
- evergreen_set_const_cache(shader, 0, shader->kernel_param,
- shader->input_size, 0);
}
static void evergreen_emit_direct_dispatch(
struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
unsigned flush_flags = 0;
int i;
- struct r600_resource *onebo = NULL;
- struct evergreen_compute_resource *resources =
- ctx->cs_shader_state.shader->resources;
/* make sure that the gfx ring is only one active */
if (ctx->rings.dma.cs) {
ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
+ /* Emit constant buffer state */
+ r600_emit_atom(ctx, &ctx->constbuf_state[PIPE_SHADER_COMPUTE].atom);
+
/* Emit compute shader state */
r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
- for (i = 0; i < get_compute_resource_num(); i++) {
- if (resources[i].enabled) {
- int j;
- COMPUTE_DBG(ctx->screen, "resnum: %i, cdw: %i\n", i, cs->cdw);
-
- for (j = 0; j < resources[i].cs_end; j++) {
- if (resources[i].do_reloc[j]) {
- assert(resources[i].bo);
- evergreen_emit_ctx_reloc(ctx,
- resources[i].bo,
- resources[i].usage);
- }
-
- cs->buf[cs->cdw++] = resources[i].cs[j];
- }
-
- if (resources[i].bo) {
- onebo = resources[i].bo;
- evergreen_emit_ctx_reloc(ctx,
- resources[i].bo,
- resources[i].usage);
-
- ///special case for textures
- if (resources[i].do_reloc
- [resources[i].cs_end] == 2) {
- evergreen_emit_ctx_reloc(ctx,
- resources[i].bo,
- resources[i].usage);
- }
- }
- }
- }
-
/* Emit dispatch state and dispatch packet */
evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
#if 0
COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
for (i = 0; i < cs->cdw; i++) {
- COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, ctx->cs->buf[i]);
+ COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, cs->buf[i]);
}
#endif
flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
}
- ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags);
+ ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags, ctx->screen->cs_count++);
- ctx->pm4_dirty_cdwords = 0;
ctx->flags = 0;
COMPUTE_DBG(ctx->screen, "shader started\n");
-
- ctx->ws->buffer_wait(onebo->buf, 0);
-
- COMPUTE_DBG(ctx->screen, "...\n");
}
unsigned start_slot, unsigned count,
struct pipe_sampler_view **views)
{
- struct r600_context *ctx = (struct r600_context *)ctx_;
struct r600_pipe_sampler_view **resource =
(struct r600_pipe_sampler_view **)views;
for (int i = 0; i < count; i++) {
if (resource[i]) {
assert(i+1 < 12);
+ /* XXX: Implement */
+ assert(!"Compute samplers not implemented.");
///FETCH0 = VTX0 (param buffer),
//FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
- evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2);
}
}
}
unsigned num_samplers,
void **samplers_)
{
- struct r600_context *ctx = (struct r600_context *)ctx_;
struct compute_sampler_state ** samplers =
(struct compute_sampler_state **)samplers_;
for (int i = 0; i < num_samplers; i++) {
if (samplers[i]) {
- evergreen_set_sampler_resource(
- ctx->cs_shader_state.shader, samplers[i], i);
+ /* XXX: Implement */
+ assert(!"Compute samplers not implemented.");
}
}
}
ctx->context.set_global_binding = evergreen_set_global_binding;
ctx->context.launch_grid = evergreen_launch_grid;
- /* We always use at least two vertex buffers for compute, one for
- * parameters and one for global memory */
+ /* We always use at least one vertex buffer for parameters (id = 1)*/
ctx->cs_vertex_buffer_state.enabled_mask =
- ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2;
+ ctx->cs_vertex_buffer_state.dirty_mask = 0x2;
}
{
struct r600_context *rctx = (struct r600_context*)ctx_;
struct compute_memory_pool *pool = rctx->screen->global_pool;
- struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
struct r600_resource_global* buffer =
(struct r600_resource_global*)resource;
- uint32_t* map;
- compute_memory_finalize_pending(pool, ctx_);
-
- assert(resource->target == PIPE_BUFFER);
-
- COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
+ COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"
"level = %u, usage = %u, box(x = %u, y = %u, z = %u "
"width = %u, height = %u, depth = %u)\n", level, usage,
box->x, box->y, box->z, box->width, box->height,
box->depth);
+ COMPUTE_DBG(rctx->screen, "Buffer: %u (buffer offset in global memory) "
+ "+ %u (box.x)\n", buffer->chunk->start_in_dw, box->x);
- transfer->resource = resource;
- transfer->level = level;
- transfer->usage = usage;
- transfer->box = *box;
- transfer->stride = 0;
- transfer->layer_stride = 0;
-
- assert(transfer->resource->target == PIPE_BUFFER);
- assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
- assert(transfer->box.x >= 0);
- assert(transfer->box.y == 0);
- assert(transfer->box.z == 0);
-
- ///TODO: do it better, mapping is not possible if the pool is too big
- COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");
-
- if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
- util_slab_free(&rctx->pool_transfers, transfer);
- return NULL;
- }
+ compute_memory_finalize_pending(pool, ctx_);
- *ptransfer = transfer;
+ assert(resource->target == PIPE_BUFFER);
+ assert(resource->bind & PIPE_BIND_GLOBAL);
+ assert(box->x >= 0);
+ assert(box->y == 0);
+ assert(box->z == 0);
- COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
- "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
- return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
+ ///TODO: do it better, mapping is not possible if the pool is too big
+ return pipe_buffer_map_range(ctx_, (struct pipe_resource*)buffer->chunk->pool->bo,
+ box->x + (buffer->chunk->start_in_dw * 4),
+ box->width, usage, ptransfer);
}
void r600_compute_global_transfer_unmap(
struct pipe_context *ctx_,
struct pipe_transfer* transfer)
{
- struct r600_context *ctx = NULL;
- struct r600_resource_global* buffer = NULL;
-
- assert(transfer->resource->target == PIPE_BUFFER);
- assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
-
- ctx = (struct r600_context *)ctx_;
- buffer = (struct r600_resource_global*)transfer->resource;
-
- COMPUTE_DBG(ctx->screen, "* r600_compute_global_transfer_unmap()\n");
-
- ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
- util_slab_free(&ctx->pool_transfers, transfer);
+ /* struct r600_resource_global are not real resources, they just map
+ * to an offset within the compute memory pool. The function
+ * r600_compute_global_transfer_map() maps the memory pool
+ * resource rather than the struct r600_resource_global passed to
+ * it as an argument and then initalizes ptransfer->resource with
+ * the memory pool resource (via pipe_buffer_map_range).
+ * When transfer_unmap is called it uses the memory pool's
+ * vtable which calls r600_buffer_transfer_map() rather than
+ * this function.
+ */
+ assert (!"This function should not be called");
}
void r600_compute_global_transfer_flush_region(