r600g/compute: Fix input buffer size calculation
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
index ce17d3a61ece49e31134cd7d3bbcd7bee0aefe31..58dc36d1d2a77b66e34022c1fb56e1dbefc67f14 100644 (file)
 #include "util/u_inlines.h"
 #include "util/u_framebuffer.h"
 #include "pipebuffer/pb_buffer.h"
-#include "r600.h"
 #include "evergreend.h"
 #include "r600_resource.h"
 #include "r600_shader.h"
 #include "r600_pipe.h"
 #include "r600_formats.h"
 #include "evergreen_compute.h"
-#include "r600_hw_context_priv.h"
 #include "evergreen_compute_internal.h"
 #include "compute_memory_pool.h"
 #ifdef HAVE_OPENCL
-#include "llvm_wrapper.h"
+#include "radeon_llvm_util.h"
 #endif
 
 /**
@@ -98,7 +96,7 @@ static void evergreen_cs_set_vertex_buffer(
 
        /* The vertex instructions in the compute shaders use the texture cache,
         * so we need to invalidate it. */
-       rctx->flags |= R600_CONTEXT_TEX_FLUSH;
+       rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
        state->enabled_mask |= 1 << vb_index;
        state->dirty_mask |= 1 << vb_index;
        state->atom.dirty = true;
@@ -127,7 +125,7 @@ void *evergreen_create_compute_state(
        const unsigned char * code;
        unsigned i;
 
-       COMPUTE_DBG("*** evergreen_create_compute_state\n");
+       COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n");
 
        header = cso->prog;
        code = cso->prog + sizeof(struct pipe_llvm_program_header);
@@ -142,12 +140,12 @@ void *evergreen_create_compute_state(
        shader->input_size = cso->req_input_mem;
 
 #ifdef HAVE_OPENCL 
-       shader->num_kernels = llvm_get_num_kernels(code, header->num_bytes);
+       shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes);
        shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels);
 
        for (i = 0; i < shader->num_kernels; i++) {
                struct r600_kernel *kernel = &shader->kernels[i];
-               kernel->llvm_module = llvm_get_kernel_module(i, code,
+               kernel->llvm_module = radeon_llvm_get_kernel_module(i, code,
                                                        header->num_bytes);
        }
 #endif
@@ -166,7 +164,7 @@ static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
 {
        struct r600_context *ctx = (struct r600_context *)ctx_;
 
-       COMPUTE_DBG("*** evergreen_bind_compute_state\n");
+       COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n");
 
        ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state;
 }
@@ -205,13 +203,12 @@ void evergreen_compute_upload_input(
                unsigned buffer_size = shader->input_size;
 
                /* Add space for the grid dimensions */
-               buffer_size += kernel_parameters_offset_bytes * sizeof(uint);
+               buffer_size += kernel_parameters_offset_bytes;
                shader->kernel_param = r600_compute_buffer_alloc_vram(
                                                ctx->screen, buffer_size);
        }
 
-       num_work_groups_start = ctx->ws->buffer_map(
-               shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
+       num_work_groups_start = r600_buffer_mmap_sync_with_rings(ctx, shader->kernel_param, PIPE_TRANSFER_WRITE);
        global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4));
        local_size_start = global_size_start + (3 * (sizeof(uint)) / 4);
        kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
@@ -232,7 +229,7 @@ void evergreen_compute_upload_input(
 
        for (i = 0; i < (kernel_parameters_offset_bytes / 4) +
                                        (shader->input_size / 4); i++) {
-               COMPUTE_DBG("input %i : %i\n", i,
+               COMPUTE_DBG(ctx->screen, "input %i : %i\n", i,
                        ((unsigned*)num_work_groups_start)[i]);
        }
 
@@ -251,7 +248,7 @@ static void evergreen_emit_direct_dispatch(
                const uint *block_layout, const uint *grid_layout)
 {
        int i;
-       struct radeon_winsys_cs *cs = rctx->cs;
+       struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
        unsigned num_waves;
        unsigned num_pipes = rctx->screen->info.r600_max_pipes;
        unsigned wave_divisor = (16 * num_pipes);
@@ -273,7 +270,7 @@ static void evergreen_emit_direct_dispatch(
        num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
                        wave_divisor - 1) / wave_divisor;
 
-       COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n",
+       COMPUTE_DBG(rctx->screen, "Using %u pipes, there are %u wavefronts per thread block\n",
                                                        num_pipes, num_waves);
 
        /* XXX: Partition the LDS between PS/CS.  By default half (4096 dwords
@@ -314,28 +311,33 @@ static void evergreen_emit_direct_dispatch(
 static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
                const uint *grid_layout)
 {
-       struct radeon_winsys_cs *cs = ctx->cs;
+       struct radeon_winsys_cs *cs = ctx->rings.gfx.cs;
        unsigned flush_flags = 0;
        int i;
-
        struct r600_resource *onebo = NULL;
        struct evergreen_compute_resource *resources =
                                        ctx->cs_shader_state.shader->resources;
 
+       /* make sure that the gfx ring is only one active */
+       if (ctx->rings.dma.cs) {
+               ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
+       }
+
        /* Initialize all the compute-related registers.
         *
         * See evergreen_init_atom_start_compute_cs() in this file for the list
         * of registers initialized by the start_compute_cs_cmd atom.
         */
-       r600_emit_command_buffer(ctx->cs, &ctx->start_compute_cs_cmd);
+       r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd);
 
-       ctx->flags |= R600_CONTEXT_CB_FLUSH;
+       ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV;
        r600_flush_emit(ctx);
 
        /* Emit colorbuffers. */
        for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) {
                struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i];
-               unsigned reloc = r600_context_bo_reloc(ctx, (struct r600_resource*)cb->base.texture,
+               unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx,
+                                                      (struct r600_resource*)cb->base.texture,
                                                       RADEON_USAGE_READWRITE);
 
                r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7);
@@ -374,7 +376,7 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
        for (i = 0; i < get_compute_resource_num(); i++) {
                if (resources[i].enabled) {
                        int j;
-                       COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw);
+                       COMPUTE_DBG(ctx->screen, "resnum: %i, cdw: %i\n", i, cs->cdw);
 
                        for (j = 0; j < resources[i].cs_end; j++) {
                                if (resources[i].do_reloc[j]) {
@@ -409,13 +411,13 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
 
        /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
         */
-       ctx->flags |= R600_CONTEXT_CB_FLUSH;
+       ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
        r600_flush_emit(ctx);
 
 #if 0
-       COMPUTE_DBG("cdw: %i\n", cs->cdw);
+       COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw);
        for (i = 0; i < cs->cdw; i++) {
-               COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]);
+               COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, ctx->cs->buf[i]);
        }
 #endif
 
@@ -424,20 +426,15 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout,
                flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
        }
 
-       ctx->ws->cs_flush(ctx->cs, flush_flags);
+       ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags);
 
-       ctx->pm4_dirty_cdwords = 0;
        ctx->flags = 0;
 
-       COMPUTE_DBG("shader started\n");
+       COMPUTE_DBG(ctx->screen, "shader started\n");
 
        ctx->ws->buffer_wait(onebo->buf, 0);
 
-       COMPUTE_DBG("...\n");
-
-       ctx->streamout_start = TRUE;
-       ctx->streamout_append_bitmask = ~0;
-
+       COMPUTE_DBG(ctx->screen, "...\n");
 }
 
 
@@ -452,7 +449,7 @@ void evergreen_emit_cs_shader(
                                        (struct r600_cs_shader_state*)atom;
        struct r600_pipe_compute *shader = state->shader;
        struct r600_kernel *kernel = &shader->kernels[state->kernel_index];
-       struct radeon_winsys_cs *cs = rctx->cs;
+       struct radeon_winsys_cs *cs = rctx->rings.gfx.cs;
        uint64_t va;
 
        va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b);
@@ -465,10 +462,10 @@ void evergreen_emit_cs_shader(
        r600_write_value(cs, 0);        /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */
 
        r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
-       r600_write_value(cs, r600_context_bo_reloc(rctx, kernel->code_bo,
-                                                       RADEON_USAGE_READ));
+       r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx,
+                                                       kernel->code_bo, RADEON_USAGE_READ));
 
-       rctx->flags |= R600_CONTEXT_SHADERCONST_FLUSH;
+       rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
 }
 
 static void evergreen_launch_grid(
@@ -479,7 +476,7 @@ static void evergreen_launch_grid(
        struct r600_context *ctx = (struct r600_context *)ctx_;
 
 #ifdef HAVE_OPENCL 
-       COMPUTE_DBG("*** evergreen_launch_grid: pc = %u\n", pc);
+       COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc);
 
        struct r600_pipe_compute *shader = ctx->cs_shader_state.shader;
        if (!shader->kernels[pc].code_bo) {
@@ -488,8 +485,7 @@ static void evergreen_launch_grid(
                r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc);
                kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen,
                                                        kernel->bc.ndw * 4);
-               p = ctx->ws->buffer_map(kernel->code_bo->cs_buf, ctx->cs,
-                                                       PIPE_TRANSFER_WRITE);
+               p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE);
                memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
                ctx->ws->buffer_unmap(kernel->code_bo->cs_buf);
        }
@@ -507,7 +503,7 @@ static void evergreen_set_compute_resources(struct pipe_context * ctx_,
        struct r600_context *ctx = (struct r600_context *)ctx_;
        struct r600_surface **resources = (struct r600_surface **)surfaces;
 
-       COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n",
+       COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
                        start, count);
 
        for (int i = 0; i < count; i++) {
@@ -580,7 +576,7 @@ static void evergreen_set_global_binding(
        struct r600_resource_global **buffers =
                (struct r600_resource_global **)resources;
 
-       COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n",
+       COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n",
                        first, n);
 
        if (!resources) {
@@ -681,8 +677,12 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx)
        }
 
        /* Config Registers */
-       evergreen_init_common_regs(cb, ctx->chip_class
-                       , ctx->family, ctx->screen->info.drm_minor);
+       if (ctx->chip_class < CAYMAN)
+               evergreen_init_common_regs(cb, ctx->chip_class, ctx->family,
+                                          ctx->screen->info.drm_minor);
+       else
+               cayman_init_common_regs(cb, ctx->chip_class, ctx->family,
+                                       ctx->screen->info.drm_minor);
 
        /* The primitive type always needs to be POINTLIST for compute. */
        r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE,
@@ -800,18 +800,22 @@ struct pipe_resource *r600_compute_global_buffer_create(
        struct pipe_screen *screen,
        const struct pipe_resource *templ)
 {
+       struct r600_resource_global* result = NULL;
+       struct r600_screen* rscreen = NULL;
+       int size_in_dw = 0;
+
        assert(templ->target == PIPE_BUFFER);
        assert(templ->bind & PIPE_BIND_GLOBAL);
        assert(templ->array_size == 1 || templ->array_size == 0);
        assert(templ->depth0 == 1 || templ->depth0 == 0);
        assert(templ->height0 == 1 || templ->height0 == 0);
 
-       struct r600_resource_global* result = (struct r600_resource_global*)
-               CALLOC(sizeof(struct r600_resource_global), 1);
-       struct r600_screen* rscreen = (struct r600_screen*)screen;
+       result = (struct r600_resource_global*)
+       CALLOC(sizeof(struct r600_resource_global), 1);
+       rscreen = (struct r600_screen*)screen;
 
-       COMPUTE_DBG("*** r600_compute_global_buffer_create\n");
-       COMPUTE_DBG("width = %u array_size = %u\n", templ->width0,
+       COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
+       COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
                        templ->array_size);
 
        result->base.b.vtbl = &r600_global_buffer_vtbl;
@@ -819,7 +823,7 @@ struct pipe_resource *r600_compute_global_buffer_create(
        result->base.b.b = *templ;
        pipe_reference_init(&result->base.b.b.reference, 1);
 
-       int size_in_dw = (templ->width0+3) / 4;
+       size_in_dw = (templ->width0+3) / 4;
 
        result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
 
@@ -836,11 +840,14 @@ void r600_compute_global_buffer_destroy(
        struct pipe_screen *screen,
        struct pipe_resource *res)
 {
+       struct r600_resource_global* buffer = NULL;
+       struct r600_screen* rscreen = NULL;
+
        assert(res->target == PIPE_BUFFER);
        assert(res->bind & PIPE_BIND_GLOBAL);
 
-       struct r600_resource_global* buffer = (struct r600_resource_global*)res;
-       struct r600_screen* rscreen = (struct r600_screen*)screen;
+       buffer = (struct r600_resource_global*)res;
+       rscreen = (struct r600_screen*)screen;
 
        compute_memory_free(rscreen->global_pool, buffer->chunk->id);
 
@@ -867,7 +874,7 @@ void *r600_compute_global_transfer_map(
 
        assert(resource->target == PIPE_BUFFER);
 
-       COMPUTE_DBG("* r600_compute_global_get_transfer()\n"
+       COMPUTE_DBG(rctx->screen, "* r600_compute_global_get_transfer()\n"
                        "level = %u, usage = %u, box(x = %u, y = %u, z = %u "
                        "width = %u, height = %u, depth = %u)\n", level, usage,
                        box->x, box->y, box->z, box->width, box->height,
@@ -888,17 +895,16 @@ void *r600_compute_global_transfer_map(
 
        ///TODO: do it better, mapping is not possible if the pool is too big
 
-       COMPUTE_DBG("* r600_compute_global_transfer_map()\n");
+       COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n");
 
-       if (!(map = rctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf,
-                                               rctx->cs, transfer->usage))) {
+       if (!(map = r600_buffer_mmap_sync_with_rings(rctx, buffer->chunk->pool->bo, transfer->usage))) {
                util_slab_free(&rctx->pool_transfers, transfer);
                return NULL;
        }
 
        *ptransfer = transfer;
 
-       COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) "
+       COMPUTE_DBG(rctx->screen, "Buffer: %p + %u (buffer offset in global memory) "
                "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x);
        return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x;
 }
@@ -907,14 +913,16 @@ void r600_compute_global_transfer_unmap(
        struct pipe_context *ctx_,
        struct pipe_transfer* transfer)
 {
+       struct r600_context *ctx = NULL;
+       struct r600_resource_global* buffer = NULL;
+
        assert(transfer->resource->target == PIPE_BUFFER);
        assert(transfer->resource->bind & PIPE_BIND_GLOBAL);
 
-       struct r600_context *ctx = (struct r600_context *)ctx_;
-       struct r600_resource_global* buffer =
-               (struct r600_resource_global*)transfer->resource;
+       ctx = (struct r600_context *)ctx_;
+       buffer = (struct r600_resource_global*)transfer->resource;
 
-       COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n");
+       COMPUTE_DBG(ctx->screen, "* r600_compute_global_transfer_unmap()\n");
 
        ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf);
        util_slab_free(&ctx->pool_transfers, transfer);