#include "pipe/p_state.h"
#include "pipe/p_context.h"
#include "util/u_blitter.h"
-#include "util/u_double_list.h"
+#include "util/list.h"
#include "util/u_transfer.h"
#include "util/u_surface.h"
#include "util/u_pack_color.h"
#include "util/u_framebuffer.h"
#include "pipebuffer/pb_buffer.h"
#include "evergreend.h"
-#include "r600_resource.h"
#include "r600_shader.h"
#include "r600_pipe.h"
#include "r600_formats.h"
#include "evergreen_compute.h"
#include "evergreen_compute_internal.h"
#include "compute_memory_pool.h"
-#ifdef HAVE_OPENCL
-#include "radeon_llvm_util.h"
-#endif
+#include "sb/sb_public.h"
+#include "radeon/radeon_elf_util.h"
+#include <inttypes.h>
/**
RAT0 is for global binding write
*/
-struct r600_resource* r600_compute_buffer_alloc_vram(
- struct r600_screen *screen,
- unsigned size)
+struct r600_resource *r600_compute_buffer_alloc_vram(struct r600_screen *screen,
+ unsigned size)
{
- struct pipe_resource * buffer = NULL;
+ struct pipe_resource *buffer = NULL;
assert(size);
- buffer = pipe_buffer_create(
- (struct pipe_screen*) screen,
- PIPE_BIND_CUSTOM,
- PIPE_USAGE_IMMUTABLE,
- size);
+ buffer = pipe_buffer_create((struct pipe_screen*) screen,
+ PIPE_BIND_CUSTOM,
+ PIPE_USAGE_IMMUTABLE,
+ size);
return (struct r600_resource *)buffer;
}
-static void evergreen_set_rat(
- struct r600_pipe_compute *pipe,
- int id,
- struct r600_resource* bo,
- int start,
- int size)
+static void evergreen_set_rat(struct r600_pipe_compute *pipe,
+ unsigned id,
+ struct r600_resource *bo,
+ int start,
+ int size)
{
struct pipe_surface rat_templ;
struct r600_surface *surf = NULL;
rat_templ.u.tex.last_layer = 0;
/* Add the RAT the list of color buffers */
- pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->context.create_surface(
+ pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface(
(struct pipe_context *)pipe->ctx,
(struct pipe_resource *)bo, &rat_templ);
evergreen_init_color_surface_rat(rctx, surf);
}
-static void evergreen_cs_set_vertex_buffer(
- struct r600_context * rctx,
- unsigned vb_index,
- unsigned offset,
- struct pipe_resource * buffer)
+static void evergreen_cs_set_vertex_buffer(struct r600_context *rctx,
+ unsigned vb_index,
+ unsigned offset,
+ struct pipe_resource *buffer)
{
struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state;
struct pipe_vertex_buffer *vb = &state->vb[vb_index];
/* The vertex instructions in the compute shaders use the texture cache,
* so we need to invalidate it. */
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+ rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
state->enabled_mask |= 1 << vb_index;
state->dirty_mask |= 1 << vb_index;
- state->atom.dirty = true;
+ r600_mark_atom_dirty(rctx, &state->atom);
}
-static void evergreen_cs_set_constant_buffer(
- struct r600_context * rctx,
- unsigned cb_index,
- unsigned offset,
- unsigned size,
- struct pipe_resource * buffer)
+static void evergreen_cs_set_constant_buffer(struct r600_context *rctx,
+ unsigned cb_index,
+ unsigned offset,
+ unsigned size,
+ struct pipe_resource *buffer)
{
struct pipe_constant_buffer cb;
cb.buffer_size = size;
cb.buffer = buffer;
cb.user_buffer = NULL;
- rctx->context.set_constant_buffer(&rctx->context, PIPE_SHADER_COMPUTE, cb_index, &cb);
+ rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, &cb);
}
-static const struct u_resource_vtbl r600_global_buffer_vtbl =
+/* We need to define these R600 registers here, because we can't include
+ * evergreend.h and r600d.h.
+ */
+#define R_028868_SQ_PGM_RESOURCES_VS 0x028868
+#define R_028850_SQ_PGM_RESOURCES_PS 0x028850
+
+#ifdef HAVE_OPENCL
+
+static void r600_shader_binary_read_config(const struct radeon_shader_binary *binary,
+ struct r600_bytecode *bc,
+ uint64_t symbol_offset,
+ boolean *use_kill)
{
- u_default_resource_get_handle, /* get_handle */
- r600_compute_global_buffer_destroy, /* resource_destroy */
- r600_compute_global_transfer_map, /* transfer_map */
- r600_compute_global_transfer_flush_region,/* transfer_flush_region */
- r600_compute_global_transfer_unmap, /* transfer_unmap */
- r600_compute_global_transfer_inline_write /* transfer_inline_write */
-};
+ unsigned i;
+ const unsigned char *config =
+ radeon_shader_binary_config_start(binary, symbol_offset);
+
+ for (i = 0; i < binary->config_size_per_symbol; i+= 8) {
+ unsigned reg =
+ util_le32_to_cpu(*(uint32_t*)(config + i));
+ unsigned value =
+ util_le32_to_cpu(*(uint32_t*)(config + i + 4));
+ switch (reg) {
+ /* R600 / R700 */
+ case R_028850_SQ_PGM_RESOURCES_PS:
+ case R_028868_SQ_PGM_RESOURCES_VS:
+ /* Evergreen / Northern Islands */
+ case R_028844_SQ_PGM_RESOURCES_PS:
+ case R_028860_SQ_PGM_RESOURCES_VS:
+ case R_0288D4_SQ_PGM_RESOURCES_LS:
+ bc->ngpr = MAX2(bc->ngpr, G_028844_NUM_GPRS(value));
+ bc->nstack = MAX2(bc->nstack, G_028844_STACK_SIZE(value));
+ break;
+ case R_02880C_DB_SHADER_CONTROL:
+ *use_kill = G_02880C_KILL_ENABLE(value);
+ break;
+ case R_0288E8_SQ_LDS_ALLOC:
+ bc->nlds_dw = value;
+ break;
+ }
+ }
+}
+static unsigned r600_create_shader(struct r600_bytecode *bc,
+ const struct radeon_shader_binary *binary,
+ boolean *use_kill)
-void *evergreen_create_compute_state(
- struct pipe_context *ctx_,
- const const struct pipe_compute_state *cso)
{
- struct r600_context *ctx = (struct r600_context *)ctx_;
- struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
+ assert(binary->code_size % 4 == 0);
+ bc->bytecode = CALLOC(1, binary->code_size);
+ memcpy(bc->bytecode, binary->code, binary->code_size);
+ bc->ndw = binary->code_size / 4;
-#ifdef HAVE_OPENCL
- const struct pipe_llvm_program_header * header;
- const unsigned char * code;
- unsigned i;
+ r600_shader_binary_read_config(binary, bc, 0, use_kill);
+ return 0;
+}
- COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n");
+#endif
+
+static void r600_destroy_shader(struct r600_bytecode *bc)
+{
+ FREE(bc->bytecode);
+}
+
+static void *evergreen_create_compute_state(struct pipe_context *ctx,
+ const struct pipe_compute_state *cso)
+{
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
+#ifdef HAVE_OPENCL
+ const struct pipe_llvm_program_header *header;
+ const char *code;
+ void *p;
+ boolean use_kill;
+ COMPUTE_DBG(rctx->screen, "*** evergreen_create_compute_state\n");
header = cso->prog;
code = cso->prog + sizeof(struct pipe_llvm_program_header);
+ radeon_shader_binary_init(&shader->binary);
+ radeon_elf_read(code, header->num_bytes, &shader->binary);
+ r600_create_shader(&shader->bc, &shader->binary, &use_kill);
+
+ /* Upload code + ROdata */
+ shader->code_bo = r600_compute_buffer_alloc_vram(rctx->screen,
+ shader->bc.ndw * 4);
+ p = r600_buffer_map_sync_with_rings(&rctx->b, shader->code_bo, PIPE_TRANSFER_WRITE);
+ //TODO: use util_memcpy_cpu_to_le32 ?
+ memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
+ rctx->b.ws->buffer_unmap(shader->code_bo->buf);
#endif
- shader->ctx = (struct r600_context*)ctx;
- shader->local_size = cso->req_local_mem; ///TODO: assert it
+ shader->ctx = rctx;
+ shader->local_size = cso->req_local_mem;
shader->private_size = cso->req_private_mem;
shader->input_size = cso->req_input_mem;
-#ifdef HAVE_OPENCL
- shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes);
- shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
-
- for (i = 0; i < shader->num_kernels; i++) {
- struct r600_kernel *kernel = &shader->kernels[i];
- kernel->llvm_module = radeon_llvm_get_kernel_module(i, code,
- header->num_bytes);
- }
-#endif
return shader;
}
-void evergreen_delete_compute_state(struct pipe_context *ctx, void* state)
+static void evergreen_delete_compute_state(struct pipe_context *ctx, void *state)
{
- struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state;
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ struct r600_pipe_compute *shader = state;
+
+ COMPUTE_DBG(rctx->screen, "*** evergreen_delete_compute_state\n");
- free(shader);
+ if (!shader)
+ return;
+
+ radeon_shader_binary_clean(&shader->binary);
+ r600_destroy_shader(&shader->bc);
+
+ /* TODO destroy shader->code_bo, shader->const_bo
+ * we'll need something like r600_buffer_free */
+ FREE(shader);
}
-static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
+static void evergreen_bind_compute_state(struct pipe_context *ctx, void *state)
{
- struct r600_context *ctx = (struct r600_context *)ctx_;
+ struct r600_context *rctx = (struct r600_context *)ctx;
- COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n");
+ COMPUTE_DBG(rctx->screen, "*** evergreen_bind_compute_state\n");
- ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
+ rctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
}
/* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit
- * kernel parameters there are inplicit parameters that need to be stored
+ * kernel parameters there are implicit parameters that need to be stored
* in the vertex buffer as well. Here is how these parameters are organized in
* the buffer:
*
* (x,y,z)
* DWORDS 9+ : Kernel parameters
*/
-void evergreen_compute_upload_input(
- struct pipe_context *ctx_,
- const uint *block_layout,
- const uint *grid_layout,
- const void *input)
+static void evergreen_compute_upload_input(struct pipe_context *ctx,
+ const struct pipe_grid_info *info)
{
- struct r600_context *ctx = (struct r600_context *)ctx_;
- struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
- int i;
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
+ unsigned i;
/* We need to reserve 9 dwords (36 bytes) for implicit kernel
* parameters.
*/
unsigned input_size = shader->input_size + 36;
- uint32_t * num_work_groups_start;
- uint32_t * global_size_start;
- uint32_t * local_size_start;
- uint32_t * kernel_parameters_start;
+ uint32_t *num_work_groups_start;
+ uint32_t *global_size_start;
+ uint32_t *local_size_start;
+ uint32_t *kernel_parameters_start;
+ struct pipe_box box;
+ struct pipe_transfer *transfer = NULL;
if (shader->input_size == 0) {
return;
if (!shader->kernel_param) {
/* Add space for the grid dimensions */
- shader->kernel_param = r600_compute_buffer_alloc_vram(
- ctx->screen, input_size);
+ shader->kernel_param = (struct r600_resource *)
+ pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_IMMUTABLE, input_size);
}
- num_work_groups_start = r600_buffer_mmap_sync_with_rings(ctx, shader->kernel_param, PIPE_TRANSFER_WRITE);
+ u_box_1d(0, input_size, &box);
+ num_work_groups_start = ctx->transfer_map(ctx,
+ (struct pipe_resource*)shader->kernel_param,
+ 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
+ &box, &transfer);
global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
/* Copy the work group size */
- memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
+ memcpy(num_work_groups_start, info->grid, 3 * sizeof(uint));
/* Copy the global size */
for (i = 0; i < 3; i++) {
- global_size_start[i] = grid_layout[i] * block_layout[i];
+ global_size_start[i] = info->grid[i] * info->block[i];
}
/* Copy the local dimensions */
- memcpy(local_size_start, block_layout, 3 * sizeof(uint));
+ memcpy(local_size_start, info->block, 3 * sizeof(uint));
/* Copy the kernel inputs */
- memcpy(kernel_parameters_start, input, shader->input_size);
+ memcpy(kernel_parameters_start, info->input, shader->input_size);
for (i = 0; i < (input_size / 4); i++) {
- COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
+ COMPUTE_DBG(rctx->screen, "input %i : %u\n", i,
((unsigned*)num_work_groups_start)[i]);
}
- ctx->ws->buffer_unmap(shader->kernel_param->cs_buf);
+ ctx->transfer_unmap(ctx, transfer);
/* ID=0 is reserved for the parameters */
- evergreen_cs_set_constant_buffer(ctx, 0, 0, input_size,
+ evergreen_cs_set_constant_buffer(rctx, 0, 0, input_size,
(struct pipe_resource*)shader->kernel_param);
}
-static void evergreen_emit_direct_dispatch(
- struct r600_context *rctx,
- const uint *block_layout, const uint *grid_layout)
+static void evergreen_emit_dispatch(struct r600_context *rctx,
+ const struct pipe_grid_info *info)
{
int i;
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
unsigned num_waves;
- unsigned num_pipes = rctx->screen->info.r600_max_pipes;
+ unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes;
unsigned wave_divisor = (16 * num_pipes);
int group_size = 1;
int grid_size = 1;
- /* XXX: Enable lds and get size from cs_shader_state */
- unsigned lds_size = 0;
+ unsigned lds_size = shader->local_size / 4 +
+ shader->bc.nlds_dw;
+
/* Calculate group_size/grid_size */
for (i = 0; i < 3; i++) {
- group_size *= block_layout[i];
+ group_size *= info->block[i];
}
for (i = 0; i < 3; i++) {
- grid_size *= grid_layout[i];
+ grid_size *= info->grid[i];
}
/* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
- num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
+ num_waves = (info->block[0] * info->block[1] * info->block[2] +
wave_divisor - 1) / wave_divisor;
- COMPUTE_DBG(rctx->screen, "Using %u pipes, there are %u wavefronts per thread block\n",
- num_pipes, num_waves);
+ COMPUTE_DBG(rctx->screen, "Using %u pipes, "
+ "%u wavefronts per thread block, "
+ "allocating %u dwords lds.\n",
+ num_pipes, num_waves, lds_size);
- /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
- * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders.
- * We may need to allocat the entire LDS space for Compute Shaders.
- *
- * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords)
- * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords)
- */
-
- r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
+ radeon_set_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size);
- r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
- r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
- r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
- r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
+ radeon_set_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3);
+ radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */
+ radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */
+ radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */
- r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
+ radeon_set_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE,
group_size);
- r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
- r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
- r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
- r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
+ radeon_compute_set_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
+ radeon_emit(cs, info->block[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
+ radeon_emit(cs, info->block[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
+ radeon_emit(cs, info->block[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
+
+ if (rctx->b.chip_class < CAYMAN) {
+ assert(lds_size <= 8192);
+ } else {
+ /* Cayman appears to have a slightly smaller limit, see the
+ * value of CM_R_0286FC_SPI_LDS_MGMT.NUM_LS_LDS */
+ assert(lds_size <= 8160);
+ }
- r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC,
+ radeon_compute_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC,
lds_size | (num_waves << 14));
/* Dispatch packet */
- r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
- r600_write_value(cs, grid_layout[0]);
- r600_write_value(cs, grid_layout[1]);
- r600_write_value(cs, grid_layout[2]);
+ radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
+ radeon_emit(cs, info->grid[0]);
+ radeon_emit(cs, info->grid[1]);
+ radeon_emit(cs, info->grid[2]);
/* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
- r600_write_value(cs, 1);
+ radeon_emit(cs, 1);
}
-static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
- const uint *grid_layout)
+static void compute_emit_cs(struct r600_context *rctx,
+ const struct pipe_grid_info *info)
{
- struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
- unsigned flush_flags = 0;
- int i;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ unsigned i;
/* make sure that the gfx ring is only one active */
- if (ctx->rings.dma.cs) {
- ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
+ if (radeon_emitted(rctx->b.dma.cs, 0)) {
+ rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
}
/* Initialize all the compute-related registers.
* See evergreen_init_atom_start_compute_cs() in this file for the list
* of registers initialized by the start_compute_cs_cmd atom.
*/
- r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd);
+ r600_emit_command_buffer(cs, &rctx->start_compute_cs_cmd);
+
+ /* emit config state */
+ if (rctx->b.chip_class == EVERGREEN)
+ r600_emit_atom(rctx, &rctx->config_state.atom);
- ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
- r600_flush_emit(ctx);
+ rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
+ r600_flush_emit(rctx);
/* Emit colorbuffers. */
- for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
- struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
- unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx,
+ /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */
+ for (i = 0; i < 8 && i < rctx->framebuffer.state.nr_cbufs; i++) {
+ struct r600_surface *cb = (struct r600_surface*)rctx->framebuffer.state.cbufs[i];
+ unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
(struct r600_resource*)cb->base.texture,
- RADEON_USAGE_READWRITE);
-
- r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
- r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
- r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
- r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
- r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
- r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
- r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
- r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
-
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
- r600_write_value(cs, reloc);
-
- if (!ctx->keep_tiling_flags) {
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */
- r600_write_value(cs, reloc);
- }
-
- r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
- r600_write_value(cs, reloc);
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SHADER_RW_BUFFER);
+
+ radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
+ radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */
+ radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */
+ radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */
+ radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */
+ radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */
+ radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */
+ radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */
+
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */
+ radeon_emit(cs, reloc);
+
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */
+ radeon_emit(cs, reloc);
}
+ for (; i < 8 ; i++)
+ radeon_compute_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
+ S_028C70_FORMAT(V_028C70_COLOR_INVALID));
+ for (; i < 12; i++)
+ radeon_compute_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C,
+ S_028C70_FORMAT(V_028C70_COLOR_INVALID));
/* Set CB_TARGET_MASK XXX: Use cb_misc_state */
- r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK,
- ctx->compute_cb_target_mask);
+ radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK,
+ rctx->compute_cb_target_mask);
/* Emit vertex buffer state */
- ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask);
- r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom);
+ rctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(rctx->cs_vertex_buffer_state.dirty_mask);
+ r600_emit_atom(rctx, &rctx->cs_vertex_buffer_state.atom);
/* Emit constant buffer state */
- r600_emit_atom(ctx, &ctx->constbuf_state[PIPE_SHADER_COMPUTE].atom);
+ r600_emit_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE].atom);
+
+ /* Emit sampler state */
+ r600_emit_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].states.atom);
+
+ /* Emit sampler view (texture resource) state */
+ r600_emit_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views.atom);
/* Emit compute shader state */
- r600_emit_atom(ctx, &ctx->cs_shader_state.atom);
+ r600_emit_atom(rctx, &rctx->cs_shader_state.atom);
/* Emit dispatch state and dispatch packet */
- evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout);
+ evergreen_emit_dispatch(rctx, info);
/* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
*/
- ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
- r600_flush_emit(ctx);
+ rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE |
+ R600_CONTEXT_INV_VERTEX_CACHE |
+ R600_CONTEXT_INV_TEX_CACHE;
+ r600_flush_emit(rctx);
+ rctx->b.flags = 0;
+
+ if (rctx->b.chip_class >= CAYMAN) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ /* DEALLOC_STATE prevents the GPU from hanging when a
+ * SURFACE_SYNC packet is emitted some time after a DISPATCH_DIRECT
+ * with any of the CB*_DEST_BASE_ENA or DB_DEST_BASE_ENA bits set.
+ */
+ radeon_emit(cs, PKT3C(PKT3_DEALLOC_STATE, 0, 0));
+ radeon_emit(cs, 0);
+ }
#if 0
- COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
+ COMPUTE_DBG(rctx->screen, "cdw: %i\n", cs->cdw);
for (i = 0; i < cs->cdw; i++) {
- COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, ctx->cs->buf[i]);
+ COMPUTE_DBG(rctx->screen, "%4i : 0x%08X\n", i, cs->buf[i]);
}
#endif
- flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE;
- if (ctx->keep_tiling_flags) {
- flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
- }
-
- ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags, ctx->screen->cs_count++);
-
- ctx->flags = 0;
-
- COMPUTE_DBG(ctx->screen, "shader started\n");
}
/**
* Emit function for r600_cs_shader_state atom
*/
-void evergreen_emit_cs_shader(
- struct r600_context *rctx,
- struct r600_atom *atom)
+void evergreen_emit_cs_shader(struct r600_context *rctx,
+ struct r600_atom *atom)
{
struct r600_cs_shader_state *state =
(struct r600_cs_shader_state*)atom;
struct r600_pipe_compute *shader = state->shader;
- struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
- struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
uint64_t va;
-
- va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
-
- r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
- r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
- r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
- S_0288D4_NUM_GPRS(kernel->bc.ngpr)
- | S_0288D4_STACK_SIZE(kernel->bc.nstack));
- r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
-
- r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
- r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx,
- kernel->code_bo, RADEON_USAGE_READ));
-
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+ struct r600_resource *code_bo;
+ unsigned ngpr, nstack;
+
+ code_bo = shader->code_bo;
+ va = shader->code_bo->gpu_address + state->pc;
+ ngpr = shader->bc.ngpr;
+ nstack = shader->bc.nstack;
+
+ radeon_compute_set_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3);
+ radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */
+ radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */
+ S_0288D4_NUM_GPRS(ngpr)
+ | S_0288D4_STACK_SIZE(nstack));
+ radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
+
+ radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0));
+ radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx,
+ code_bo, RADEON_USAGE_READ,
+ RADEON_PRIO_USER_SHADER));
}
-static void evergreen_launch_grid(
- struct pipe_context *ctx_,
- const uint *block_layout, const uint *grid_layout,
- uint32_t pc, const void *input)
+static void evergreen_launch_grid(struct pipe_context *ctx,
+ const struct pipe_grid_info *info)
{
- struct r600_context *ctx = (struct r600_context *)ctx_;
-
-#ifdef HAVE_OPENCL
- COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
-
- struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
- if (!shader->kernels[pc].code_bo) {
- void *p;
- struct r600_kernel *kernel = &shader->kernels[pc];
- r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
- kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
- kernel->bc.ndw * 4);
- p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE);
- memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
- ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
- }
+ struct r600_context *rctx = (struct r600_context *)ctx;
+#ifdef HAVE_OPENCL
+ struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
+ boolean use_kill;
+
+ rctx->cs_shader_state.pc = info->pc;
+ /* Get the config information for this kernel. */
+ r600_shader_binary_read_config(&shader->binary, &shader->bc,
+ info->pc, &use_kill);
#endif
- ctx->cs_shader_state.kernel_index = pc;
- evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input);
- compute_emit_cs(ctx, block_layout, grid_layout);
+ COMPUTE_DBG(rctx->screen, "*** evergreen_launch_grid: pc = %u\n", info->pc);
+
+
+ evergreen_compute_upload_input(ctx, info);
+ compute_emit_cs(rctx, info);
}
-static void evergreen_set_compute_resources(struct pipe_context * ctx_,
- unsigned start, unsigned count,
- struct pipe_surface ** surfaces)
+static void evergreen_set_compute_resources(struct pipe_context *ctx,
+ unsigned start, unsigned count,
+ struct pipe_surface **surfaces)
{
- struct r600_context *ctx = (struct r600_context *)ctx_;
+ struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_surface **resources = (struct r600_surface **)surfaces;
- COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
+ COMPUTE_DBG(rctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
start, count);
- for (int i = 0; i < count; i++) {
- /* The First two vertex buffers are reserved for parameters and
+ for (unsigned i = 0; i < count; i++) {
+ /* The First three vertex buffers are reserved for parameters and
* global buffers. */
- unsigned vtx_id = 2 + i;
+ unsigned vtx_id = 3 + i;
if (resources[i]) {
struct r600_resource_global *buffer =
(struct r600_resource_global*)
if (resources[i]->base.writable) {
assert(i+1 < 12);
- evergreen_set_rat(ctx->cs_shader_state.shader, i+1,
+ evergreen_set_rat(rctx->cs_shader_state.shader, i+1,
(struct r600_resource *)resources[i]->base.texture,
buffer->chunk->start_in_dw*4,
resources[i]->base.texture->width0);
}
- evergreen_cs_set_vertex_buffer(ctx, vtx_id,
+ evergreen_cs_set_vertex_buffer(rctx, vtx_id,
buffer->chunk->start_in_dw * 4,
resources[i]->base.texture);
}
}
}
-static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_,
- unsigned start_slot, unsigned count,
- struct pipe_sampler_view **views)
+static void evergreen_set_global_binding(struct pipe_context *ctx,
+ unsigned first, unsigned n,
+ struct pipe_resource **resources,
+ uint32_t **handles)
{
- struct r600_pipe_sampler_view **resource =
- (struct r600_pipe_sampler_view **)views;
-
- for (int i = 0; i < count; i++) {
- if (resource[i]) {
- assert(i+1 < 12);
- /* XXX: Implement */
- assert(!"Compute samplers not implemented.");
- ///FETCH0 = VTX0 (param buffer),
- //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX
- }
- }
-}
-
-static void evergreen_bind_compute_sampler_states(
- struct pipe_context *ctx_,
- unsigned start_slot,
- unsigned num_samplers,
- void **samplers_)
-{
- struct compute_sampler_state ** samplers =
- (struct compute_sampler_state **)samplers_;
-
- for (int i = 0; i < num_samplers; i++) {
- if (samplers[i]) {
- /* XXX: Implement */
- assert(!"Compute samplers not implemented.");
- }
- }
-}
-
-static void evergreen_set_global_binding(
- struct pipe_context *ctx_, unsigned first, unsigned n,
- struct pipe_resource **resources,
- uint32_t **handles)
-{
- struct r600_context *ctx = (struct r600_context *)ctx_;
- struct compute_memory_pool *pool = ctx->screen->global_pool;
+ struct r600_context *rctx = (struct r600_context *)ctx;
+ struct compute_memory_pool *pool = rctx->screen->global_pool;
struct r600_resource_global **buffers =
(struct r600_resource_global **)resources;
+ unsigned i;
- COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
+ COMPUTE_DBG(rctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
first, n);
if (!resources) {
return;
}
- compute_memory_finalize_pending(pool, ctx_);
+ /* We mark these items for promotion to the pool if they
+ * aren't already there */
+ for (i = first; i < first + n; i++) {
+ struct compute_memory_item *item = buffers[i]->chunk;
+
+ if (!is_item_in_pool(item))
+ buffers[i]->chunk->status |= ITEM_FOR_PROMOTING;
+ }
+
+ if (compute_memory_finalize_pending(pool, ctx) == -1) {
+ /* XXX: Unset */
+ return;
+ }
- for (int i = 0; i < n; i++)
+ for (i = first; i < first + n; i++)
{
+ uint32_t buffer_offset;
+ uint32_t handle;
assert(resources[i]->target == PIPE_BUFFER);
assert(resources[i]->bind & PIPE_BIND_GLOBAL);
- *(handles[i]) = buffers[i]->chunk->start_in_dw * 4;
+ buffer_offset = util_le32_to_cpu(*(handles[i]));
+ handle = buffer_offset + buffers[i]->chunk->start_in_dw * 4;
+
+ *(handles[i]) = util_cpu_to_le32(handle);
}
- evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
- evergreen_cs_set_vertex_buffer(ctx, 1, 0,
+ /* globals for writing */
+ evergreen_set_rat(rctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
+ /* globals for reading */
+ evergreen_cs_set_vertex_buffer(rctx, 1, 0,
(struct pipe_resource*)pool->bo);
+
+ /* constants for reading, LLVM puts them in text segment */
+ evergreen_cs_set_vertex_buffer(rctx, 2, 0,
+ (struct pipe_resource*)rctx->cs_shader_state.shader->code_bo);
}
/**
* command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG
* packet requires that the shader type bit be set, we must initialize all
* context registers needed for compute in this function. The registers
- * intialized by the start_cs_cmd atom can be found in evereen_state.c in the
+ * initialized by the start_cs_cmd atom can be found in evergreen_state.c in the
* functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending
* on the GPU family.
*/
-void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
+void evergreen_init_atom_start_compute_cs(struct r600_context *rctx)
{
- struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd;
+ struct r600_command_buffer *cb = &rctx->start_compute_cs_cmd;
int num_threads;
int num_stack_entries;
- /* since all required registers are initialised in the
+ /* since all required registers are initialized in the
* start_compute_cs_cmd atom, we can EMIT_EARLY here.
*/
r600_init_command_buffer(cb, 256);
r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0));
r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
- switch (ctx->family) {
+ switch (rctx->b.family) {
case CHIP_CEDAR:
default:
num_threads = 128;
}
/* Config Registers */
- if (ctx->chip_class < CAYMAN)
- evergreen_init_common_regs(cb, ctx->chip_class, ctx->family,
- ctx->screen->info.drm_minor);
+ if (rctx->b.chip_class < CAYMAN)
+ evergreen_init_common_regs(rctx, cb, rctx->b.chip_class, rctx->b.family,
+ rctx->screen->b.info.drm_minor);
else
- cayman_init_common_regs(cb, ctx->chip_class, ctx->family,
- ctx->screen->info.drm_minor);
+ cayman_init_common_regs(cb, rctx->b.chip_class, rctx->b.family,
+ rctx->screen->b.info.drm_minor);
/* The primitive type always needs to be POINTLIST for compute. */
r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
V_008958_DI_PT_POINTLIST);
- if (ctx->chip_class < CAYMAN) {
+ if (rctx->b.chip_class < CAYMAN) {
/* These registers control which simds can be used by each stage.
* The default for these registers is 0xffffffff, which means
* R_008E28_SQ_STATIC_THREAD_MGMT3
*/
- /* XXX: We may need to adjust the thread and stack resouce
+ /* XXX: We may need to adjust the thread and stack resource
* values for 3D/compute interop */
r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5);
r600_store_value(cb,
S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries));
}
+ /* Give the compute shader all the available LDS space.
+ * NOTE: This only sets the maximum number of dwords that a compute
+ * shader can allocate. When a shader is executed, we still need to
+ * allocate the appropriate amount of LDS dwords using the
+ * CM_R_0288E8_SQ_LDS_ALLOC register.
+ */
+ if (rctx->b.chip_class < CAYMAN) {
+ r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT,
+ S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192));
+ } else {
+ r600_store_context_reg(cb, CM_R_0286FC_SPI_LDS_MGMT,
+ S_0286FC_NUM_PS_LDS(0) |
+ S_0286FC_NUM_LS_LDS(255)); /* 255 * 32 = 8160 dwords */
+ }
/* Context Registers */
- if (ctx->chip_class < CAYMAN) {
+ if (rctx->b.chip_class < CAYMAN) {
/* workaround for hw issues with dyn gpr - must set all limits
* to 240 instead of 0, 0x1e == 240 / 8
*/
eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF);
}
-void evergreen_init_compute_state_functions(struct r600_context *ctx)
+void evergreen_init_compute_state_functions(struct r600_context *rctx)
{
- ctx->context.create_compute_state = evergreen_create_compute_state;
- ctx->context.delete_compute_state = evergreen_delete_compute_state;
- ctx->context.bind_compute_state = evergreen_bind_compute_state;
-// ctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
- ctx->context.set_compute_resources = evergreen_set_compute_resources;
- ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view;
- ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states;
- ctx->context.set_global_binding = evergreen_set_global_binding;
- ctx->context.launch_grid = evergreen_launch_grid;
-
- /* We always use at least one vertex buffer for parameters (id = 1)*/
- ctx->cs_vertex_buffer_state.enabled_mask =
- ctx->cs_vertex_buffer_state.dirty_mask = 0x2;
-}
+ rctx->b.b.create_compute_state = evergreen_create_compute_state;
+ rctx->b.b.delete_compute_state = evergreen_delete_compute_state;
+ rctx->b.b.bind_compute_state = evergreen_bind_compute_state;
+// rctx->context.create_sampler_view = evergreen_compute_create_sampler_view;
+ rctx->b.b.set_compute_resources = evergreen_set_compute_resources;
+ rctx->b.b.set_global_binding = evergreen_set_global_binding;
+ rctx->b.b.launch_grid = evergreen_launch_grid;
+}
-struct pipe_resource *r600_compute_global_buffer_create(
- struct pipe_screen *screen,
- const struct pipe_resource *templ)
+static void *r600_compute_global_transfer_map(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **ptransfer)
{
- struct r600_resource_global* result = NULL;
- struct r600_screen* rscreen = NULL;
- int size_in_dw = 0;
+ struct r600_context *rctx = (struct r600_context*)ctx;
+ struct compute_memory_pool *pool = rctx->screen->global_pool;
+ struct r600_resource_global* buffer =
+ (struct r600_resource_global*)resource;
- assert(templ->target == PIPE_BUFFER);
- assert(templ->bind & PIPE_BIND_GLOBAL);
- assert(templ->array_size == 1 || templ->array_size == 0);
- assert(templ->depth0 == 1 || templ->depth0 == 0);
- assert(templ->height0 == 1 || templ->height0 == 0);
+ struct compute_memory_item *item = buffer->chunk;
+ struct pipe_resource *dst = NULL;
+ unsigned offset = box->x;
- result = (struct r600_resource_global*)
- CALLOC(sizeof(struct r600_resource_global), 1);
- rscreen = (struct r600_screen*)screen;
+ if (is_item_in_pool(item)) {
+ compute_memory_demote_item(pool, item, ctx);
+ }
+ else {
+ if (item->real_buffer == NULL) {
+ item->real_buffer =
+ r600_compute_buffer_alloc_vram(pool->screen, item->size_in_dw * 4);
+ }
+ }
- COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
- COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
- templ->array_size);
+ dst = (struct pipe_resource*)item->real_buffer;
- result->base.b.vtbl = &r600_global_buffer_vtbl;
- result->base.b.b.screen = screen;
- result->base.b.b = *templ;
- pipe_reference_init(&result->base.b.b.reference, 1);
+ if (usage & PIPE_TRANSFER_READ)
+ buffer->chunk->status |= ITEM_MAPPED_FOR_READING;
- size_in_dw = (templ->width0+3) / 4;
+ COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n"
+ "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
+ "width = %u, height = %u, depth = %u)\n", level, usage,
+ box->x, box->y, box->z, box->width, box->height,
+ box->depth);
+ COMPUTE_DBG(rctx->screen, "Buffer id = %"PRIi64" offset = "
+ "%u (box.x)\n", item->id, box->x);
- result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
- if (result->chunk == NULL)
- {
- free(result);
- return NULL;
- }
+ assert(resource->target == PIPE_BUFFER);
+ assert(resource->bind & PIPE_BIND_GLOBAL);
+ assert(box->x >= 0);
+ assert(box->y == 0);
+ assert(box->z == 0);
- return &result->base.b.b;
+ ///TODO: do it better, mapping is not possible if the pool is too big
+ return pipe_buffer_map_range(ctx, dst,
+ offset, box->width, usage, ptransfer);
+}
+
+static void r600_compute_global_transfer_unmap(struct pipe_context *ctx,
+ struct pipe_transfer *transfer)
+{
+ /* struct r600_resource_global are not real resources, they just map
+ * to an offset within the compute memory pool. The function
+ * r600_compute_global_transfer_map() maps the memory pool
+ * resource rather than the struct r600_resource_global passed to
+ * it as an argument and then initalizes ptransfer->resource with
+ * the memory pool resource (via pipe_buffer_map_range).
+ * When transfer_unmap is called it uses the memory pool's
+ * vtable which calls r600_buffer_transfer_map() rather than
+ * this function.
+ */
+ assert (!"This function should not be called");
+}
+
+static void r600_compute_global_transfer_flush_region(struct pipe_context *ctx,
+ struct pipe_transfer *transfer,
+ const struct pipe_box *box)
+{
+ assert(0 && "TODO");
+}
+
+static void r600_compute_global_transfer_inline_write(struct pipe_context *pipe,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ const void *data,
+ unsigned stride,
+ unsigned layer_stride)
+{
+ assert(0 && "TODO");
}
-void r600_compute_global_buffer_destroy(
- struct pipe_screen *screen,
- struct pipe_resource *res)
+static void r600_compute_global_buffer_destroy(struct pipe_screen *screen,
+ struct pipe_resource *res)
{
struct r600_resource_global* buffer = NULL;
struct r600_screen* rscreen = NULL;
free(res);
}
-void *r600_compute_global_transfer_map(
- struct pipe_context *ctx_,
- struct pipe_resource *resource,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box,
- struct pipe_transfer **ptransfer)
+static const struct u_resource_vtbl r600_global_buffer_vtbl =
{
- struct r600_context *rctx = (struct r600_context*)ctx_;
- struct compute_memory_pool *pool = rctx->screen->global_pool;
- struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
- struct r600_resource_global* buffer =
- (struct r600_resource_global*)resource;
- uint32_t* map;
+ u_default_resource_get_handle, /* get_handle */
+ r600_compute_global_buffer_destroy, /* resource_destroy */
+ r600_compute_global_transfer_map, /* transfer_map */
+ r600_compute_global_transfer_flush_region,/* transfer_flush_region */
+ r600_compute_global_transfer_unmap, /* transfer_unmap */
+ r600_compute_global_transfer_inline_write /* transfer_inline_write */
+};
- compute_memory_finalize_pending(pool, ctx_);
+struct pipe_resource *r600_compute_global_buffer_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ)
+{
+ struct r600_resource_global* result = NULL;
+ struct r600_screen* rscreen = NULL;
+ int size_in_dw = 0;
- assert(resource->target == PIPE_BUFFER);
+ assert(templ->target == PIPE_BUFFER);
+ assert(templ->bind & PIPE_BIND_GLOBAL);
+ assert(templ->array_size == 1 || templ->array_size == 0);
+ assert(templ->depth0 == 1 || templ->depth0 == 0);
+ assert(templ->height0 == 1 || templ->height0 == 0);
- COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
- "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
- "width = %u, height = %u, depth = %u)\n", level, usage,
- box->x, box->y, box->z, box->width, box->height,
- box->depth);
+ result = (struct r600_resource_global*)
+ CALLOC(sizeof(struct r600_resource_global), 1);
+ rscreen = (struct r600_screen*)screen;
- transfer->resource = resource;
- transfer->level = level;
- transfer->usage = usage;
- transfer->box = *box;
- transfer->stride = 0;
- transfer->layer_stride = 0;
+ COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
+ COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
+ templ->array_size);
- assert(transfer->resource->target == PIPE_BUFFER);
- assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
- assert(transfer->box.x >= 0);
- assert(transfer->box.y == 0);
- assert(transfer->box.z == 0);
+ result->base.b.vtbl = &r600_global_buffer_vtbl;
+ result->base.b.b = *templ;
+ result->base.b.b.screen = screen;
+ pipe_reference_init(&result->base.b.b.reference, 1);
- ///TODO: do it better, mapping is not possible if the pool is too big
+ size_in_dw = (templ->width0+3) / 4;
- COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");
+ result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
- if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
- util_slab_free(&rctx->pool_transfers, transfer);
+ if (result->chunk == NULL)
+ {
+ free(result);
return NULL;
}
- *ptransfer = transfer;
-
- COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
- "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
- return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
-}
-
-void r600_compute_global_transfer_unmap(
- struct pipe_context *ctx_,
- struct pipe_transfer* transfer)
-{
- struct r600_context *ctx = NULL;
- struct r600_resource_global* buffer = NULL;
-
- assert(transfer->resource->target == PIPE_BUFFER);
- assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
-
- ctx = (struct r600_context *)ctx_;
- buffer = (struct r600_resource_global*)transfer->resource;
-
- COMPUTE_DBG(ctx->screen, "* r600_compute_global_transfer_unmap()\n");
-
- ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
- util_slab_free(&ctx->pool_transfers, transfer);
-}
-
-void r600_compute_global_transfer_flush_region(
- struct pipe_context *ctx_,
- struct pipe_transfer *transfer,
- const struct pipe_box *box)
-{
- assert(0 && "TODO");
-}
-
-void r600_compute_global_transfer_inline_write(
- struct pipe_context *pipe,
- struct pipe_resource *resource,
- unsigned level,
- unsigned usage,
- const struct pipe_box *box,
- const void *data,
- unsigned stride,
- unsigned layer_stride)
-{
- assert(0 && "TODO");
+ return &result->base.b.b;
}