nouveau: Add support for SV_WORK_DIM
[mesa.git] / src / gallium / drivers / r600 / evergreen_compute.c
index 6abb77f676ca6d3ba2b76baddc56f4a46185a488..8db525e0bf6844ef4fb1d4d07c03f0d46ba76da1 100644 (file)
@@ -177,16 +177,6 @@ static void evergreen_cs_set_constant_buffer(struct r600_context *rctx,
        rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, &cb);
 }
 
-static const struct u_resource_vtbl r600_global_buffer_vtbl =
-{
-       u_default_resource_get_handle, /* get_handle */
-       r600_compute_global_buffer_destroy, /* resource_destroy */
-       r600_compute_global_transfer_map, /* transfer_map */
-       r600_compute_global_transfer_flush_region,/* transfer_flush_region */
-       r600_compute_global_transfer_unmap, /* transfer_unmap */
-       r600_compute_global_transfer_inline_write /* transfer_inline_write */
-};
-
 /* We need to define these R600 registers here, because we can't include
  * evergreend.h and r600d.h.
  */
@@ -251,10 +241,10 @@ static void r600_destroy_shader(struct r600_bytecode *bc)
        FREE(bc->bytecode);
 }
 
-void *evergreen_create_compute_state(struct pipe_context *ctx_,
-                                    const const struct pipe_compute_state *cso)
+static void *evergreen_create_compute_state(struct pipe_context *ctx,
+                                           const const struct pipe_compute_state *cso)
 {
-       struct r600_context *rctx = (struct r600_context *)ctx_;
+       struct r600_context *rctx = (struct r600_context *)ctx;
        struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute);
 #ifdef HAVE_OPENCL
        const struct pipe_llvm_program_header *header;
@@ -269,9 +259,11 @@ void *evergreen_create_compute_state(struct pipe_context *ctx_,
        radeon_elf_read(code, header->num_bytes, &shader->binary);
        r600_create_shader(&shader->bc, &shader->binary, &use_kill);
 
+       /* Upload code + ROdata */
        shader->code_bo = r600_compute_buffer_alloc_vram(rctx->screen,
                                                        shader->bc.ndw * 4);
        p = r600_buffer_map_sync_with_rings(&rctx->b, shader->code_bo, PIPE_TRANSFER_WRITE);
+       //TODO: use util_memcpy_cpu_to_le32 ?
        memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
        rctx->b.ws->buffer_unmap(shader->code_bo->buf);
 #endif
@@ -284,9 +276,9 @@ void *evergreen_create_compute_state(struct pipe_context *ctx_,
        return shader;
 }
 
-void evergreen_delete_compute_state(struct pipe_context *ctx_, void *state)
+static void evergreen_delete_compute_state(struct pipe_context *ctx, void *state)
 {
-       struct r600_context *rctx = (struct r600_context *)ctx_;
+       struct r600_context *rctx = (struct r600_context *)ctx;
        struct r600_pipe_compute *shader = state;
 
        COMPUTE_DBG(rctx->screen, "*** evergreen_delete_compute_state\n");
@@ -302,9 +294,9 @@ void evergreen_delete_compute_state(struct pipe_context *ctx_, void *state)
        FREE(shader);
 }
 
-static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
+static void evergreen_bind_compute_state(struct pipe_context *ctx, void *state)
 {
-       struct r600_context *rctx = (struct r600_context *)ctx_;
+       struct r600_context *rctx = (struct r600_context *)ctx;
 
        COMPUTE_DBG(rctx->screen, "*** evergreen_bind_compute_state\n");
 
@@ -322,12 +314,10 @@ static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state)
  *             (x,y,z)
  * DWORDS 9+ : Kernel parameters
  */
-void evergreen_compute_upload_input(struct pipe_context *ctx_,
-                                   const uint *block_layout,
-                                   const uint *grid_layout,
-                                   const void *input)
+static void evergreen_compute_upload_input(struct pipe_context *ctx,
+                                          const struct pipe_grid_info *info)
 {
-       struct r600_context *rctx = (struct r600_context *)ctx_;
+       struct r600_context *rctx = (struct r600_context *)ctx;
        struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
        unsigned i;
        /* We need to reserve 9 dwords (36 bytes) for implicit kernel
@@ -348,12 +338,12 @@ void evergreen_compute_upload_input(struct pipe_context *ctx_,
        if (!shader->kernel_param) {
                /* Add space for the grid dimensions */
                shader->kernel_param = (struct r600_resource *)
-                       pipe_buffer_create(ctx_->screen, PIPE_BIND_CUSTOM,
+                       pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM,
                                        PIPE_USAGE_IMMUTABLE, input_size);
        }
 
        u_box_1d(0, input_size, &box);
-       num_work_groups_start = ctx_->transfer_map(ctx_,
+       num_work_groups_start = ctx->transfer_map(ctx,
                        (struct pipe_resource*)shader->kernel_param,
                        0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE,
                        &box, &transfer);
@@ -362,34 +352,33 @@ void evergreen_compute_upload_input(struct pipe_context *ctx_,
        kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4);
 
        /* Copy the work group size */
-       memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint));
+       memcpy(num_work_groups_start, info->grid, 3 * sizeof(uint));
 
        /* Copy the global size */
        for (i = 0; i < 3; i++) {
-               global_size_start[i] = grid_layout[i] * block_layout[i];
+               global_size_start[i] = info->grid[i] * info->block[i];
        }
 
        /* Copy the local dimensions */
-       memcpy(local_size_start, block_layout, 3 * sizeof(uint));
+       memcpy(local_size_start, info->block, 3 * sizeof(uint));
 
        /* Copy the kernel inputs */
-       memcpy(kernel_parameters_start, input, shader->input_size);
+       memcpy(kernel_parameters_start, info->input, shader->input_size);
 
        for (i = 0; i < (input_size / 4); i++) {
                COMPUTE_DBG(rctx->screen, "input %i : %u\n", i,
                        ((unsigned*)num_work_groups_start)[i]);
        }
 
-       ctx_->transfer_unmap(ctx_, transfer);
+       ctx->transfer_unmap(ctx, transfer);
 
        /* ID=0 is reserved for the parameters */
        evergreen_cs_set_constant_buffer(rctx, 0, 0, input_size,
                        (struct pipe_resource*)shader->kernel_param);
 }
 
-static void evergreen_emit_direct_dispatch(struct r600_context *rctx,
-                                          const uint *block_layout,
-                                          const uint *grid_layout)
+static void evergreen_emit_dispatch(struct r600_context *rctx,
+                                   const struct pipe_grid_info *info)
 {
        int i;
        struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
@@ -405,15 +394,15 @@ static void evergreen_emit_direct_dispatch(struct r600_context *rctx,
 
        /* Calculate group_size/grid_size */
        for (i = 0; i < 3; i++) {
-               group_size *= block_layout[i];
+               group_size *= info->block[i];
        }
 
        for (i = 0; i < 3; i++) {
-               grid_size *= grid_layout[i];
+               grid_size *= info->grid[i];
        }
 
        /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */
-       num_waves = (block_layout[0] * block_layout[1] * block_layout[2] +
+       num_waves = (info->block[0] * info->block[1] * info->block[2] +
                        wave_divisor - 1) / wave_divisor;
 
        COMPUTE_DBG(rctx->screen, "Using %u pipes, "
@@ -432,9 +421,9 @@ static void evergreen_emit_direct_dispatch(struct r600_context *rctx,
                                                                group_size);
 
        radeon_compute_set_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3);
-       radeon_emit(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
-       radeon_emit(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
-       radeon_emit(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
+       radeon_emit(cs, info->block[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */
+       radeon_emit(cs, info->block[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */
+       radeon_emit(cs, info->block[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */
 
        if (rctx->b.chip_class < CAYMAN) {
                assert(lds_size <= 8192);
@@ -449,22 +438,21 @@ static void evergreen_emit_direct_dispatch(struct r600_context *rctx,
 
        /* Dispatch packet */
        radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0));
-       radeon_emit(cs, grid_layout[0]);
-       radeon_emit(cs, grid_layout[1]);
-       radeon_emit(cs, grid_layout[2]);
+       radeon_emit(cs, info->grid[0]);
+       radeon_emit(cs, info->grid[1]);
+       radeon_emit(cs, info->grid[2]);
        /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */
        radeon_emit(cs, 1);
 }
 
 static void compute_emit_cs(struct r600_context *rctx,
-                           const uint *block_layout,
-                           const uint *grid_layout)
+                           const struct pipe_grid_info *info)
 {
        struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
        unsigned i;
 
        /* make sure that the gfx ring is only one active */
-       if (rctx->b.dma.cs && rctx->b.dma.cs->cdw) {
+       if (radeon_emitted(rctx->b.dma.cs, 0)) {
                rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
        }
 
@@ -535,7 +523,7 @@ static void compute_emit_cs(struct r600_context *rctx,
        r600_emit_atom(rctx, &rctx->cs_shader_state.atom);
 
        /* Emit dispatch state and dispatch packet */
-       evergreen_emit_direct_dispatch(rctx, block_layout, grid_layout);
+       evergreen_emit_dispatch(rctx, info);
 
        /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
         */
@@ -546,14 +534,14 @@ static void compute_emit_cs(struct r600_context *rctx,
        rctx->b.flags = 0;
 
        if (rctx->b.chip_class >= CAYMAN) {
-               cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
-               cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4);
+               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+               radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
                /* DEALLOC_STATE prevents the GPU from hanging when a
                 * SURFACE_SYNC packet is emitted some time after a DISPATCH_DIRECT
                 * with any of the CB*_DEST_BASE_ENA or DB_DEST_BASE_ENA bits set.
                 */
-               cs->buf[cs->cdw++] = PKT3C(PKT3_DEALLOC_STATE, 0, 0);
-               cs->buf[cs->cdw++] = 0;
+               radeon_emit(cs, PKT3C(PKT3_DEALLOC_STATE, 0, 0));
+               radeon_emit(cs, 0);
        }
 
 #if 0
@@ -598,10 +586,10 @@ void evergreen_emit_cs_shader(struct r600_context *rctx,
                                              RADEON_PRIO_USER_SHADER));
 }
 
-static void evergreen_launch_grid(struct pipe_context *ctx_,
+static void evergreen_launch_grid(struct pipe_context *ctx,
                                  const struct pipe_grid_info *info)
 {
-       struct r600_context *rctx = (struct r600_context *)ctx_;
+       struct r600_context *rctx = (struct r600_context *)ctx;
 #ifdef HAVE_OPENCL
        struct r600_pipe_compute *shader = rctx->cs_shader_state.shader;
        boolean use_kill;
@@ -615,24 +603,24 @@ static void evergreen_launch_grid(struct pipe_context *ctx_,
        COMPUTE_DBG(rctx->screen, "*** evergreen_launch_grid: pc = %u\n", info->pc);
 
 
-       evergreen_compute_upload_input(ctx_, info->block, info->grid, info->input);
-       compute_emit_cs(rctx, info->block, info->grid);
+       evergreen_compute_upload_input(ctx, info);
+       compute_emit_cs(rctx, info);
 }
 
-static void evergreen_set_compute_resources(struct pipe_context *ctx_,
+static void evergreen_set_compute_resources(struct pipe_context *ctx,
                                            unsigned start, unsigned count,
                                            struct pipe_surface **surfaces)
 {
-       struct r600_context *rctx = (struct r600_context *)ctx_;
+       struct r600_context *rctx = (struct r600_context *)ctx;
        struct r600_surface **resources = (struct r600_surface **)surfaces;
 
        COMPUTE_DBG(rctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n",
                        start, count);
 
        for (unsigned i = 0; i < count; i++) {
-               /* The First two vertex buffers are reserved for parameters and
+               /* The First three vertex buffers are reserved for parameters and
                 * global buffers. */
-               unsigned vtx_id = 2 + i;
+               unsigned vtx_id = 3 + i;
                if (resources[i]) {
                        struct r600_resource_global *buffer =
                                (struct r600_resource_global*)
@@ -653,12 +641,12 @@ static void evergreen_set_compute_resources(struct pipe_context *ctx_,
        }
 }
 
-static void evergreen_set_global_binding(struct pipe_context *ctx_,
+static void evergreen_set_global_binding(struct pipe_context *ctx,
                                         unsigned first, unsigned n,
                                         struct pipe_resource **resources,
                                         uint32_t **handles)
 {
-       struct r600_context *rctx = (struct r600_context *)ctx_;
+       struct r600_context *rctx = (struct r600_context *)ctx;
        struct compute_memory_pool *pool = rctx->screen->global_pool;
        struct r600_resource_global **buffers =
                (struct r600_resource_global **)resources;
@@ -681,7 +669,7 @@ static void evergreen_set_global_binding(struct pipe_context *ctx_,
                        buffers[i]->chunk->status |= ITEM_FOR_PROMOTING;
        }
 
-       if (compute_memory_finalize_pending(pool, ctx_) == -1) {
+       if (compute_memory_finalize_pending(pool, ctx) == -1) {
                /* XXX: Unset */
                return;
        }
@@ -699,9 +687,15 @@ static void evergreen_set_global_binding(struct pipe_context *ctx_,
                *(handles[i]) = util_cpu_to_le32(handle);
        }
 
+       /* globals for writing */
        evergreen_set_rat(rctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4);
+       /* globals for reading */
        evergreen_cs_set_vertex_buffer(rctx, 1, 0,
                                (struct pipe_resource*)pool->bo);
+
+       /* constants for reading, LLVM puts them in text segment */
+       evergreen_cs_set_vertex_buffer(rctx, 2, 0,
+                               (struct pipe_resource*)rctx->cs_shader_state.shader->code_bo);
 }
 
 /**
@@ -908,71 +902,14 @@ void evergreen_init_compute_state_functions(struct r600_context *rctx)
 
 }
 
-struct pipe_resource *r600_compute_global_buffer_create(struct pipe_screen *screen,
-                                                       const struct pipe_resource *templ)
-{
-       struct r600_resource_global* result = NULL;
-       struct r600_screen* rscreen = NULL;
-       int size_in_dw = 0;
-
-       assert(templ->target == PIPE_BUFFER);
-       assert(templ->bind & PIPE_BIND_GLOBAL);
-       assert(templ->array_size == 1 || templ->array_size == 0);
-       assert(templ->depth0 == 1 || templ->depth0 == 0);
-       assert(templ->height0 == 1 || templ->height0 == 0);
-
-       result = (struct r600_resource_global*)
-       CALLOC(sizeof(struct r600_resource_global), 1);
-       rscreen = (struct r600_screen*)screen;
-
-       COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
-       COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
-                       templ->array_size);
-
-       result->base.b.vtbl = &r600_global_buffer_vtbl;
-       result->base.b.b = *templ;
-       result->base.b.b.screen = screen;
-       pipe_reference_init(&result->base.b.b.reference, 1);
-
-       size_in_dw = (templ->width0+3) / 4;
-
-       result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
-
-       if (result->chunk == NULL)
-       {
-               free(result);
-               return NULL;
-       }
-
-       return &result->base.b.b;
-}
-
-void r600_compute_global_buffer_destroy(struct pipe_screen *screen,
-                                       struct pipe_resource *res)
-{
-       struct r600_resource_global* buffer = NULL;
-       struct r600_screen* rscreen = NULL;
-
-       assert(res->target == PIPE_BUFFER);
-       assert(res->bind & PIPE_BIND_GLOBAL);
-
-       buffer = (struct r600_resource_global*)res;
-       rscreen = (struct r600_screen*)screen;
-
-       compute_memory_free(rscreen->global_pool, buffer->chunk->id);
-
-       buffer->chunk = NULL;
-       free(res);
-}
-
-void *r600_compute_global_transfer_map(struct pipe_context *ctx_,
-                                      struct pipe_resource *resource,
-                                      unsigned level,
-                                      unsigned usage,
-                                      const struct pipe_box *box,
-                                      struct pipe_transfer **ptransfer)
+static void *r600_compute_global_transfer_map(struct pipe_context *ctx,
+                                             struct pipe_resource *resource,
+                                             unsigned level,
+                                             unsigned usage,
+                                             const struct pipe_box *box,
+                                             struct pipe_transfer **ptransfer)
 {
-       struct r600_context *rctx = (struct r600_context*)ctx_;
+       struct r600_context *rctx = (struct r600_context*)ctx;
        struct compute_memory_pool *pool = rctx->screen->global_pool;
        struct r600_resource_global* buffer =
                (struct r600_resource_global*)resource;
@@ -982,7 +919,7 @@ void *r600_compute_global_transfer_map(struct pipe_context *ctx_,
        unsigned offset = box->x;
 
        if (is_item_in_pool(item)) {
-               compute_memory_demote_item(pool, item, ctx_);
+               compute_memory_demote_item(pool, item, ctx);
        }
        else {
                if (item->real_buffer == NULL) {
@@ -1012,12 +949,12 @@ void *r600_compute_global_transfer_map(struct pipe_context *ctx_,
        assert(box->z == 0);
 
        ///TODO: do it better, mapping is not possible if the pool is too big
-       return pipe_buffer_map_range(ctx_, dst,
+       return pipe_buffer_map_range(ctx, dst,
                        offset, box->width, usage, ptransfer);
 }
 
-void r600_compute_global_transfer_unmap(struct pipe_context *ctx_,
-                                       struct pipe_transfer *transfer)
+static void r600_compute_global_transfer_unmap(struct pipe_context *ctx,
+                                              struct pipe_transfer *transfer)
 {
        /* struct r600_resource_global are not real resources, they just map
         * to an offset within the compute memory pool.  The function
@@ -1032,21 +969,88 @@ void r600_compute_global_transfer_unmap(struct pipe_context *ctx_,
        assert (!"This function should not be called");
 }
 
-void r600_compute_global_transfer_flush_region(struct pipe_context *ctx_,
-                                              struct pipe_transfer *transfer,
-                                              const struct pipe_box *box)
+static void r600_compute_global_transfer_flush_region(struct pipe_context *ctx,
+                                                     struct pipe_transfer *transfer,
+                                                     const struct pipe_box *box)
 {
        assert(0 && "TODO");
 }
 
-void r600_compute_global_transfer_inline_write(struct pipe_context *pipe,
-                                              struct pipe_resource *resource,
-                                              unsigned level,
-                                              unsigned usage,
-                                              const struct pipe_box *box,
-                                              const void *data,
-                                              unsigned stride,
-                                              unsigned layer_stride)
+static void r600_compute_global_transfer_inline_write(struct pipe_context *pipe,
+                                                     struct pipe_resource *resource,
+                                                     unsigned level,
+                                                     unsigned usage,
+                                                     const struct pipe_box *box,
+                                                     const void *data,
+                                                     unsigned stride,
+                                                     unsigned layer_stride)
 {
        assert(0 && "TODO");
 }
+
+static void r600_compute_global_buffer_destroy(struct pipe_screen *screen,
+                                              struct pipe_resource *res)
+{
+       struct r600_resource_global* buffer = NULL;
+       struct r600_screen* rscreen = NULL;
+
+       assert(res->target == PIPE_BUFFER);
+       assert(res->bind & PIPE_BIND_GLOBAL);
+
+       buffer = (struct r600_resource_global*)res;
+       rscreen = (struct r600_screen*)screen;
+
+       compute_memory_free(rscreen->global_pool, buffer->chunk->id);
+
+       buffer->chunk = NULL;
+       free(res);
+}
+
+static const struct u_resource_vtbl r600_global_buffer_vtbl =
+{
+       u_default_resource_get_handle, /* get_handle */
+       r600_compute_global_buffer_destroy, /* resource_destroy */
+       r600_compute_global_transfer_map, /* transfer_map */
+       r600_compute_global_transfer_flush_region,/* transfer_flush_region */
+       r600_compute_global_transfer_unmap, /* transfer_unmap */
+       r600_compute_global_transfer_inline_write /* transfer_inline_write */
+};
+
+struct pipe_resource *r600_compute_global_buffer_create(struct pipe_screen *screen,
+                                                       const struct pipe_resource *templ)
+{
+       struct r600_resource_global* result = NULL;
+       struct r600_screen* rscreen = NULL;
+       int size_in_dw = 0;
+
+       assert(templ->target == PIPE_BUFFER);
+       assert(templ->bind & PIPE_BIND_GLOBAL);
+       assert(templ->array_size == 1 || templ->array_size == 0);
+       assert(templ->depth0 == 1 || templ->depth0 == 0);
+       assert(templ->height0 == 1 || templ->height0 == 0);
+
+       result = (struct r600_resource_global*)
+       CALLOC(sizeof(struct r600_resource_global), 1);
+       rscreen = (struct r600_screen*)screen;
+
+       COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n");
+       COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0,
+                       templ->array_size);
+
+       result->base.b.vtbl = &r600_global_buffer_vtbl;
+       result->base.b.b = *templ;
+       result->base.b.b.screen = screen;
+       pipe_reference_init(&result->base.b.b.reference, 1);
+
+       size_in_dw = (templ->width0+3) / 4;
+
+       result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw);
+
+       if (result->chunk == NULL)
+       {
+               free(result);
+               return NULL;
+       }
+
+       return &result->base.b.b;
+}