#include "util/u_inlines.h"
#include "util/u_framebuffer.h"
#include "pipebuffer/pb_buffer.h"
-#include "r600.h"
#include "evergreend.h"
#include "r600_resource.h"
#include "r600_shader.h"
#include "r600_pipe.h"
#include "r600_formats.h"
#include "evergreen_compute.h"
-#include "r600_hw_context_priv.h"
#include "evergreen_compute_internal.h"
#include "compute_memory_pool.h"
#ifdef HAVE_OPENCL
-#include "llvm_wrapper.h"
+#include "radeon_llvm_util.h"
#endif
/**
/* The vertex instructions in the compute shaders use the texture cache,
* so we need to invalidate it. */
- rctx->flags |= R600_CONTEXT_GPU_FLUSH;
+ rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
state->enabled_mask |= 1 << vb_index;
state->dirty_mask |= 1 << vb_index;
state->atom.dirty = true;
const unsigned char * code;
unsigned i;
- COMPUTE_DBG("*** evergreen_create_compute_state\n");
+ COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n");
header = cso->prog;
code = cso->prog + sizeof(struct pipe_llvm_program_header);
shader->input_size = cso->req_input_mem;
#ifdef HAVE_OPENCL
- shader->num_kernels = llvm_get_num_kernels(code, header->num_bytes);
+ shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes);
shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
for (i = 0; i < shader->num_kernels; i++) {
struct r600_kernel *kernel = &shader->kernels[i];
- kernel->llvm_module = llvm_get_kernel_module(i, code,
+ kernel->llvm_module = radeon_llvm_get_kernel_module(i, code,
header->num_bytes);
}
#endif
{
struct r600_context *ctx = (struct r600_context *)ctx_;
- COMPUTE_DBG("*** evergreen_bind_compute_state\n");
+ COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n");
ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
}
unsigned buffer_size = shader->input_size;
/* Add space for the grid dimensions */
- buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
+ buffer_size += kernel_parameters_offset_bytes;
shader->kernel_param = r600_compute_buffer_alloc_vram(
ctx->screen, buffer_size);
}
- num_work_groups_start = ctx->ws->buffer_map(
- shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
+ num_work_groups_start = r600_buffer_mmap_sync_with_rings(ctx, shader->kernel_param, PIPE_TRANSFER_WRITE);
global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
(shader->input_size / 4); i++) {
- COMPUTE_DBG("input %i : %i\n", i,
+ COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
((unsigned*)num_work_groups_start)[i]);
}
const uint *block_layout, const uint *grid_layout)
{
int i;
- struct radeon_winsys_cs *cs = rctx->cs;
+ struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
unsigned num_waves;
unsigned num_pipes = rctx->screen->info.r600_max_pipes;
unsigned wave_divisor = (16 * num_pipes);
num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
wave_divisor - 1) / wave_divisor;
- COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
+ COMPUTE_DBG(rctx->screen, "Using %u pipes, there are %u wavefronts per thread block\n",
num_pipes, num_waves);
/* XXX: Partition the LDS between PS/CS. By default half (4096 dwords
static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
const uint *grid_layout)
{
- struct radeon_winsys_cs *cs = ctx->cs;
+ struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
unsigned flush_flags = 0;
int i;
-
struct r600_resource *onebo = NULL;
struct evergreen_compute_resource *resources =
ctx->cs_shader_state.shader->resources;
+ /* make sure that the gfx ring is only one active */
+ if (ctx->rings.dma.cs) {
+ ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
+ }
+
/* Initialize all the compute-related registers.
*
* See evergreen_init_atom_start_compute_cs() in this file for the list
* of registers initialized by the start_compute_cs_cmd atom.
*/
- r600_emit_command_buffer(ctx->cs, &ctx->start_compute_cs_cmd);
+ r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd);
- ctx->flags |= R600_CONTEXT_WAIT_IDLE | R600_CONTEXT_FLUSH_AND_INV;
+ ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
r600_flush_emit(ctx);
/* Emit colorbuffers. */
for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
- unsigned reloc = r600_context_bo_reloc(ctx, (struct r600_resource*)cb->base.texture,
+ unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx,
+ (struct r600_resource*)cb->base.texture,
RADEON_USAGE_READWRITE);
r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
for (i = 0; i < get_compute_resource_num(); i++) {
if (resources[i].enabled) {
int j;
- COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
+ COMPUTE_DBG(ctx->screen, "resnum: %i, cdw: %i\n", i, cs->cdw);
for (j = 0; j < resources[i].cs_end; j++) {
if (resources[i].do_reloc[j]) {
/* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
*/
- ctx->flags |= R600_CONTEXT_GPU_FLUSH;
+ ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
r600_flush_emit(ctx);
#if 0
- COMPUTE_DBG("cdw: %i\n", cs->cdw);
+ COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
for (i = 0; i < cs->cdw; i++) {
- COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
+ COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, ctx->cs->buf[i]);
}
#endif
flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
}
- ctx->ws->cs_flush(ctx->cs, flush_flags);
+ ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags);
- ctx->pm4_dirty_cdwords = 0;
ctx->flags = 0;
- COMPUTE_DBG("shader started\n");
+ COMPUTE_DBG(ctx->screen, "shader started\n");
ctx->ws->buffer_wait(onebo->buf, 0);
- COMPUTE_DBG("...\n");
-
- ctx->streamout_start = TRUE;
- ctx->streamout_append_bitmask = ~0;
-
+ COMPUTE_DBG(ctx->screen, "...\n");
}
(struct r600_cs_shader_state*)atom;
struct r600_pipe_compute *shader = state->shader;
struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
- struct radeon_winsys_cs *cs = rctx->cs;
+ struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
uint64_t va;
va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
- r600_write_value(cs, r600_context_bo_reloc(rctx, kernel->code_bo,
- RADEON_USAGE_READ));
+ r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx,
+ kernel->code_bo, RADEON_USAGE_READ));
- rctx->flags |= R600_CONTEXT_GPU_FLUSH;
+ rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
}
static void evergreen_launch_grid(
struct r600_context *ctx = (struct r600_context *)ctx_;
#ifdef HAVE_OPENCL
- COMPUTE_DBG("*** evergreen_launch_grid: pc = %u\n", pc);
+ COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
if (!shader->kernels[pc].code_bo) {
r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
kernel->bc.ndw * 4);
- p = ctx->ws->buffer_map(kernel->code_bo->cs_buf, ctx->cs,
- PIPE_TRANSFER_WRITE);
+ p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE);
memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
}
struct r600_context *ctx = (struct r600_context *)ctx_;
struct r600_surface **resources = (struct r600_surface **)surfaces;
- COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
+ COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
start, count);
for (int i = 0; i < count; i++) {
struct r600_resource_global **buffers =
(struct r600_resource_global **)resources;
- COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
+ COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
first, n);
if (!resources) {
CALLOC(sizeof(struct r600_resource_global), 1);
rscreen = (struct r600_screen*)screen;
- COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
- COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
+ COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
+ COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
templ->array_size);
result->base.b.vtbl = &r600_global_buffer_vtbl;
assert(resource->target == PIPE_BUFFER);
- COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
+ COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
"level = %u, usage = %u, box(x = %u, y = %u, z = %u "
"width = %u, height = %u, depth = %u)\n", level, usage,
box->x, box->y, box->z, box->width, box->height,
///TODO: do it better, mapping is not possible if the pool is too big
- COMPUTE_DBG("* r600_compute_global_transfer_map()\n");
+ COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");
- if (!(map = rctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
- rctx->cs, transfer->usage))) {
+ if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
util_slab_free(&rctx->pool_transfers, transfer);
return NULL;
}
*ptransfer = transfer;
- COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) "
+ COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
"+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
}
ctx = (struct r600_context *)ctx_;
buffer = (struct r600_resource_global*)transfer->resource;
- COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n");
+ COMPUTE_DBG(ctx->screen, "* r600_compute_global_transfer_unmap()\n");
ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
util_slab_free(&ctx->pool_transfers, transfer);