X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fr600%2Fevergreen_compute.c;h=292b5e32afd3675d8c5d7580d227ce65e14b9594;hb=1da538f85bc327f4ae5e1a5b90c15b99f8cf48df;hp=3bb3895bf7348b5f73341dc3eb2a11a9e2b6b373;hpb=92af184690995d3b16731518f7becfaac3538edb;p=mesa.git diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c index 3bb3895bf73..292b5e32afd 100644 --- a/src/gallium/drivers/r600/evergreen_compute.c +++ b/src/gallium/drivers/r600/evergreen_compute.c @@ -30,7 +30,7 @@ #include "pipe/p_state.h" #include "pipe/p_context.h" #include "util/u_blitter.h" -#include "util/u_double_list.h" +#include "util/list.h" #include "util/u_transfer.h" #include "util/u_surface.h" #include "util/u_pack_color.h" @@ -38,19 +38,16 @@ #include "util/u_inlines.h" #include "util/u_framebuffer.h" #include "pipebuffer/pb_buffer.h" -#include "r600.h" #include "evergreend.h" -#include "r600_resource.h" #include "r600_shader.h" #include "r600_pipe.h" #include "r600_formats.h" #include "evergreen_compute.h" -#include "r600_hw_context_priv.h" #include "evergreen_compute_internal.h" #include "compute_memory_pool.h" -#ifdef HAVE_OPENCL -#include "llvm_wrapper.h" -#endif +#include "sb/sb_public.h" +#include "radeon/radeon_elf_util.h" +#include /** RAT0 is for global binding write @@ -83,127 +80,228 @@ writable images will consume TEX slots, VTX slots too because of linear indexing */ -const struct u_resource_vtbl r600_global_buffer_vtbl = +struct r600_resource *r600_compute_buffer_alloc_vram(struct r600_screen *screen, + unsigned size) { - u_default_resource_get_handle, /* get_handle */ - r600_compute_global_buffer_destroy, /* resource_destroy */ - r600_compute_global_get_transfer, /* get_transfer */ - r600_compute_global_transfer_destroy, /* transfer_destroy */ - r600_compute_global_transfer_map, /* transfer_map */ - r600_compute_global_transfer_flush_region,/* transfer_flush_region */ - r600_compute_global_transfer_unmap, /* transfer_unmap */ - r600_compute_global_transfer_inline_write /* transfer_inline_write */ -}; + struct pipe_resource *buffer = NULL; + assert(size); + buffer = pipe_buffer_create((struct pipe_screen*) screen, + PIPE_BIND_CUSTOM, + PIPE_USAGE_IMMUTABLE, + size); -void *evergreen_create_compute_state( - struct pipe_context *ctx_, - const const struct pipe_compute_state *cso) + return (struct r600_resource *)buffer; +} + + +static void evergreen_set_rat(struct r600_pipe_compute *pipe, + unsigned id, + struct r600_resource *bo, + int start, + int size) { - struct r600_context *ctx = (struct r600_context *)ctx_; + struct pipe_surface rat_templ; + struct r600_surface *surf = NULL; + struct r600_context *rctx = NULL; + + assert(id < 12); + assert((size & 3) == 0); + assert((start & 0xFF) == 0); + + rctx = pipe->ctx; + + COMPUTE_DBG(rctx->screen, "bind rat: %i \n", id); + + /* Create the RAT surface */ + memset(&rat_templ, 0, sizeof(rat_templ)); + rat_templ.format = PIPE_FORMAT_R32_UINT; + rat_templ.u.tex.level = 0; + rat_templ.u.tex.first_layer = 0; + rat_templ.u.tex.last_layer = 0; + + /* Add the RAT the list of color buffers */ + pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface( + (struct pipe_context *)pipe->ctx, + (struct pipe_resource *)bo, &rat_templ); + + /* Update the number of color buffers */ + pipe->ctx->framebuffer.state.nr_cbufs = + MAX2(id + 1, pipe->ctx->framebuffer.state.nr_cbufs); + + /* Update the cb_target_mask + * XXX: I think this is a potential spot for bugs once we start doing + * GL interop. cb_target_mask may be modified in the 3D sections + * of this driver. */ + pipe->ctx->compute_cb_target_mask |= (0xf << (id * 4)); + + surf = (struct r600_surface*)pipe->ctx->framebuffer.state.cbufs[id]; + evergreen_init_color_surface_rat(rctx, surf); +} -#ifdef HAVE_OPENCL - const struct pipe_llvm_program_header * header; - const unsigned char * code; +static void evergreen_cs_set_vertex_buffer(struct r600_context *rctx, + unsigned vb_index, + unsigned offset, + struct pipe_resource *buffer) +{ + struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state; + struct pipe_vertex_buffer *vb = &state->vb[vb_index]; + vb->stride = 1; + vb->buffer_offset = offset; + vb->buffer = buffer; + vb->user_buffer = NULL; + + /* The vertex instructions in the compute shaders use the texture cache, + * so we need to invalidate it. */ + rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE; + state->enabled_mask |= 1 << vb_index; + state->dirty_mask |= 1 << vb_index; + r600_mark_atom_dirty(rctx, &state->atom); +} - header = cso->prog; - code = cso->prog + sizeof(struct pipe_llvm_program_header); -#endif +static void evergreen_cs_set_constant_buffer(struct r600_context *rctx, + unsigned cb_index, + unsigned offset, + unsigned size, + struct pipe_resource *buffer) +{ + struct pipe_constant_buffer cb; + cb.buffer_size = size; + cb.buffer_offset = offset; + cb.buffer = buffer; + cb.user_buffer = NULL; - if (!ctx->screen->screen.get_param(&ctx->screen->screen, - PIPE_CAP_COMPUTE)) { - fprintf(stderr, "Compute is not supported\n"); - return NULL; - } - struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute); + rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, &cb); +} - shader->ctx = (struct r600_context*)ctx; - shader->resources = (struct evergreen_compute_resource*) - CALLOC(sizeof(struct evergreen_compute_resource), - get_compute_resource_num()); - shader->local_size = cso->req_local_mem; ///TODO: assert it - shader->private_size = cso->req_private_mem; - shader->input_size = cso->req_input_mem; +/* We need to define these R600 registers here, because we can't include + * evergreend.h and r600d.h. + */ +#define R_028868_SQ_PGM_RESOURCES_VS 0x028868 +#define R_028850_SQ_PGM_RESOURCES_PS 0x028850 -#ifdef HAVE_OPENCL - shader->mod = llvm_parse_bitcode(code, header->num_bytes); +#ifdef HAVE_OPENCL - r600_compute_shader_create(ctx_, shader->mod, &shader->bc); -#endif - return shader; +static void r600_shader_binary_read_config(const struct radeon_shader_binary *binary, + struct r600_bytecode *bc, + uint64_t symbol_offset, + boolean *use_kill) +{ + unsigned i; + const unsigned char *config = + radeon_shader_binary_config_start(binary, symbol_offset); + + for (i = 0; i < binary->config_size_per_symbol; i+= 8) { + unsigned reg = + util_le32_to_cpu(*(uint32_t*)(config + i)); + unsigned value = + util_le32_to_cpu(*(uint32_t*)(config + i + 4)); + switch (reg) { + /* R600 / R700 */ + case R_028850_SQ_PGM_RESOURCES_PS: + case R_028868_SQ_PGM_RESOURCES_VS: + /* Evergreen / Northern Islands */ + case R_028844_SQ_PGM_RESOURCES_PS: + case R_028860_SQ_PGM_RESOURCES_VS: + case R_0288D4_SQ_PGM_RESOURCES_LS: + bc->ngpr = MAX2(bc->ngpr, G_028844_NUM_GPRS(value)); + bc->nstack = MAX2(bc->nstack, G_028844_STACK_SIZE(value)); + break; + case R_02880C_DB_SHADER_CONTROL: + *use_kill = G_02880C_KILL_ENABLE(value); + break; + case R_0288E8_SQ_LDS_ALLOC: + bc->nlds_dw = value; + break; + } + } } -void evergreen_delete_compute_state(struct pipe_context *ctx, void* state) +static unsigned r600_create_shader(struct r600_bytecode *bc, + const struct radeon_shader_binary *binary, + boolean *use_kill) + { - struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state; + assert(binary->code_size % 4 == 0); + bc->bytecode = CALLOC(1, binary->code_size); + memcpy(bc->bytecode, binary->code, binary->code_size); + bc->ndw = binary->code_size / 4; - free(shader->resources); - free(shader); + r600_shader_binary_read_config(binary, bc, 0, use_kill); + return 0; } -static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state) +#endif + +static void r600_destroy_shader(struct r600_bytecode *bc) { - struct r600_context *ctx = (struct r600_context *)ctx_; + FREE(bc->bytecode); +} - ctx->cs_shader = (struct r600_pipe_compute *)state; +static void *evergreen_create_compute_state(struct pipe_context *ctx, + const struct pipe_compute_state *cso) +{ + struct r600_context *rctx = (struct r600_context *)ctx; + struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute); +#ifdef HAVE_OPENCL + const struct pipe_llvm_program_header *header; + const char *code; + void *p; + boolean use_kill; - assert(!ctx->cs_shader->shader_code_bo); + COMPUTE_DBG(rctx->screen, "*** evergreen_create_compute_state\n"); + header = cso->prog; + code = cso->prog + sizeof(struct pipe_llvm_program_header); + radeon_shader_binary_init(&shader->binary); + radeon_elf_read(code, header->num_bytes, &shader->binary); + r600_create_shader(&shader->bc, &shader->binary, &use_kill); + + /* Upload code + ROdata */ + shader->code_bo = r600_compute_buffer_alloc_vram(rctx->screen, + shader->bc.ndw * 4); + p = r600_buffer_map_sync_with_rings(&rctx->b, shader->code_bo, PIPE_TRANSFER_WRITE); + //TODO: use util_memcpy_cpu_to_le32 ? + memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4); + rctx->b.ws->buffer_unmap(shader->code_bo->buf); +#endif - ctx->cs_shader->shader_code_bo = - r600_compute_buffer_alloc_vram(ctx->screen, - ctx->cs_shader->bc.ndw * 4); + shader->ctx = rctx; + shader->local_size = cso->req_local_mem; + shader->private_size = cso->req_private_mem; + shader->input_size = cso->req_input_mem; - void *p = ctx->ws->buffer_map(ctx->cs_shader->shader_code_bo->cs_buf, - ctx->cs, PIPE_TRANSFER_WRITE); + return shader; +} - memcpy(p, ctx->cs_shader->bc.bytecode, ctx->cs_shader->bc.ndw * 4); +static void evergreen_delete_compute_state(struct pipe_context *ctx, void *state) +{ + struct r600_context *rctx = (struct r600_context *)ctx; + struct r600_pipe_compute *shader = state; - ctx->ws->buffer_unmap(ctx->cs_shader->shader_code_bo->cs_buf); + COMPUTE_DBG(rctx->screen, "*** evergreen_delete_compute_state\n"); - evergreen_compute_init_config(ctx); + if (!shader) + return; - struct evergreen_compute_resource* res = get_empty_res(ctx->cs_shader, - COMPUTE_RESOURCE_SHADER, 0); + radeon_shader_binary_clean(&shader->binary); + r600_destroy_shader(&shader->bc); - if (ctx->chip_class < CAYMAN) { - evergreen_reg_set(res, R_008C0C_SQ_GPR_RESOURCE_MGMT_3, - S_008C0C_NUM_LS_GPRS(ctx->cs_shader->bc.ngpr)); - } + /* TODO destroy shader->code_bo, shader->const_bo + * we'll need something like r600_buffer_free */ + FREE(shader); +} - ///maybe we can use it later - evergreen_reg_set(res, R_0286C8_SPI_THREAD_GROUPING, 0); - ///maybe we can use it later - evergreen_reg_set(res, R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2, 0); - - evergreen_reg_set(res, R_0288D4_SQ_PGM_RESOURCES_LS, - S_0288D4_NUM_GPRS(ctx->cs_shader->bc.ngpr) - | S_0288D4_STACK_SIZE(ctx->cs_shader->bc.nstack)); - evergreen_reg_set(res, R_0288D8_SQ_PGM_RESOURCES_LS_2, 0); - - evergreen_reg_set(res, R_0288D0_SQ_PGM_START_LS, 0); - res->bo = ctx->cs_shader->shader_code_bo; - res->usage = RADEON_USAGE_READ; - res->coher_bo_size = ctx->cs_shader->bc.ndw*4; - res->flags = COMPUTE_RES_SH_FLUSH; - - /* We can't always determine the - * number of iterations in a loop before it's executed, - * so we just need to set up the loop counter to give us the maximum - * number of iterations possible. Currently, loops in shader code - * ignore the loop counter and use a break instruction to exit the - * loop at the correct time. - */ - evergreen_set_loop_const(ctx->cs_shader, - 0, /* index */ - 0xFFF, /* Maximum value of the loop counter (i.e. when the loop - * counter reaches this value, the program will break - * out of the loop. */ - 0x0, /* Starting value of the loop counter. */ - 0x1); /* Amount to increment the loop counter each iteration. */ +static void evergreen_bind_compute_state(struct pipe_context *ctx, void *state) +{ + struct r600_context *rctx = (struct r600_context *)ctx; + + COMPUTE_DBG(rctx->screen, "*** evergreen_bind_compute_state\n"); + + rctx->cs_shader_state.shader = (struct r600_pipe_compute *)state; } /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit - * kernel parameters there are inplicit parameters that need to be stored + * kernel parameters there are implicit parameters that need to be stored * in the vertex buffer as well. Here is how these parameters are organized in * the buffer: * @@ -213,420 +311,549 @@ static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state) * (x,y,z) * DWORDS 9+ : Kernel parameters */ -void evergreen_compute_upload_input( - struct pipe_context *ctx_, - const uint *block_layout, - const uint *grid_layout, - const void *input) +static void evergreen_compute_upload_input(struct pipe_context *ctx, + const struct pipe_grid_info *info) { - struct r600_context *ctx = (struct r600_context *)ctx_; - int i; - unsigned kernel_parameters_offset_bytes = 36; - uint32_t * num_work_groups_start; - uint32_t * global_size_start; - uint32_t * local_size_start; - uint32_t * kernel_parameters_start; - - if (ctx->cs_shader->input_size == 0) { + struct r600_context *rctx = (struct r600_context *)ctx; + struct r600_pipe_compute *shader = rctx->cs_shader_state.shader; + unsigned i; + /* We need to reserve 9 dwords (36 bytes) for implicit kernel + * parameters. + */ + unsigned input_size = shader->input_size + 36; + uint32_t *num_work_groups_start; + uint32_t *global_size_start; + uint32_t *local_size_start; + uint32_t *kernel_parameters_start; + struct pipe_box box; + struct pipe_transfer *transfer = NULL; + + if (shader->input_size == 0) { return; } - if (!ctx->cs_shader->kernel_param) { - unsigned buffer_size = ctx->cs_shader->input_size; - + if (!shader->kernel_param) { /* Add space for the grid dimensions */ - buffer_size += kernel_parameters_offset_bytes * sizeof(uint); - ctx->cs_shader->kernel_param = - r600_compute_buffer_alloc_vram(ctx->screen, - buffer_size); + shader->kernel_param = (struct r600_resource *) + pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, + PIPE_USAGE_IMMUTABLE, input_size); } - num_work_groups_start = ctx->ws->buffer_map( - ctx->cs_shader->kernel_param->cs_buf, - ctx->cs, PIPE_TRANSFER_WRITE); + u_box_1d(0, input_size, &box); + num_work_groups_start = ctx->transfer_map(ctx, + (struct pipe_resource*)shader->kernel_param, + 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE, + &box, &transfer); global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4)); local_size_start = global_size_start + (3 * (sizeof(uint)) / 4); kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4); /* Copy the work group size */ - memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint)); + memcpy(num_work_groups_start, info->grid, 3 * sizeof(uint)); /* Copy the global size */ for (i = 0; i < 3; i++) { - global_size_start[i] = grid_layout[i] * block_layout[i]; + global_size_start[i] = info->grid[i] * info->block[i]; } /* Copy the local dimensions */ - memcpy(local_size_start, block_layout, 3 * sizeof(uint)); + memcpy(local_size_start, info->block, 3 * sizeof(uint)); /* Copy the kernel inputs */ - memcpy(kernel_parameters_start, input, ctx->cs_shader->input_size); + memcpy(kernel_parameters_start, info->input, shader->input_size); - for (i = 0; i < (kernel_parameters_offset_bytes / 4) + - (ctx->cs_shader->input_size / 4); i++) { - COMPUTE_DBG("input %i : %i\n", i, + for (i = 0; i < (input_size / 4); i++) { + COMPUTE_DBG(rctx->screen, "input %i : %u\n", i, ((unsigned*)num_work_groups_start)[i]); } - ctx->ws->buffer_unmap(ctx->cs_shader->kernel_param->cs_buf); + ctx->transfer_unmap(ctx, transfer); - ///ID=0 is reserved for the parameters - evergreen_set_vtx_resource(ctx->cs_shader, - ctx->cs_shader->kernel_param, 0, 0, 0); - ///ID=0 is reserved for parameters - evergreen_set_const_cache(ctx->cs_shader, 0, - ctx->cs_shader->kernel_param, ctx->cs_shader->input_size, 0); + /* ID=0 and ID=3 are reserved for the parameters. + * LLVM will preferably use ID=0, but it does not work for dynamic + * indices. */ + evergreen_cs_set_vertex_buffer(rctx, 3, 0, + (struct pipe_resource*)shader->kernel_param); + evergreen_cs_set_constant_buffer(rctx, 0, 0, input_size, + (struct pipe_resource*)shader->kernel_param); } -void evergreen_direct_dispatch( - struct pipe_context *ctx_, - const uint *block_layout, const uint *grid_layout) +static void evergreen_emit_dispatch(struct r600_context *rctx, + const struct pipe_grid_info *info) { - struct r600_context *ctx = (struct r600_context *)ctx_; - int i; + struct radeon_winsys_cs *cs = rctx->b.gfx.cs; + struct r600_pipe_compute *shader = rctx->cs_shader_state.shader; + unsigned num_waves; + unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes; + unsigned wave_divisor = (16 * num_pipes); + int group_size = 1; + int grid_size = 1; + unsigned lds_size = shader->local_size / 4 + + shader->bc.nlds_dw; - struct evergreen_compute_resource* res = get_empty_res(ctx->cs_shader, - COMPUTE_RESOURCE_DISPATCH, 0); - evergreen_reg_set(res, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST); + /* Calculate group_size/grid_size */ + for (i = 0; i < 3; i++) { + group_size *= info->block[i]; + } - evergreen_reg_set(res, R_00899C_VGT_COMPUTE_START_X, 0); - evergreen_reg_set(res, R_0089A0_VGT_COMPUTE_START_Y, 0); - evergreen_reg_set(res, R_0089A4_VGT_COMPUTE_START_Z, 0); + for (i = 0; i < 3; i++) { + grid_size *= info->grid[i]; + } - evergreen_reg_set(res, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, block_layout[0]); - evergreen_reg_set(res, R_0286F0_SPI_COMPUTE_NUM_THREAD_Y, block_layout[1]); - evergreen_reg_set(res, R_0286F4_SPI_COMPUTE_NUM_THREAD_Z, block_layout[2]); + /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */ + num_waves = (info->block[0] * info->block[1] * info->block[2] + + wave_divisor - 1) / wave_divisor; - int group_size = 1; + COMPUTE_DBG(rctx->screen, "Using %u pipes, " + "%u wavefronts per thread block, " + "allocating %u dwords lds.\n", + num_pipes, num_waves, lds_size); - int grid_size = 1; + radeon_set_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size); - for (i = 0; i < 3; i++) { - group_size *= block_layout[i]; - } + radeon_set_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3); + radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */ + radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */ + radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */ - for (i = 0; i < 3; i++) { - grid_size *= grid_layout[i]; + radeon_set_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, + group_size); + + radeon_compute_set_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3); + radeon_emit(cs, info->block[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ + radeon_emit(cs, info->block[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ + radeon_emit(cs, info->block[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ + + if (rctx->b.chip_class < CAYMAN) { + assert(lds_size <= 8192); + } else { + /* Cayman appears to have a slightly smaller limit, see the + * value of CM_R_0286FC_SPI_LDS_MGMT.NUM_LS_LDS */ + assert(lds_size <= 8160); } - evergreen_reg_set(res, R_008970_VGT_NUM_INDICES, group_size); - evergreen_reg_set(res, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size); + radeon_compute_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC, + lds_size | (num_waves << 14)); - evergreen_emit_raw_value(res, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0)); - evergreen_emit_raw_value(res, grid_layout[0]); - evergreen_emit_raw_value(res, grid_layout[1]); - evergreen_emit_raw_value(res, grid_layout[2]); - ///VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN - evergreen_emit_raw_value(res, 1); + /* Dispatch packet */ + radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0)); + radeon_emit(cs, info->grid[0]); + radeon_emit(cs, info->grid[1]); + radeon_emit(cs, info->grid[2]); + /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */ + radeon_emit(cs, 1); } -static void compute_emit_cs(struct r600_context *ctx) +static void compute_emit_cs(struct r600_context *rctx, + const struct pipe_grid_info *info) { - struct radeon_winsys_cs *cs = ctx->cs; - int i; + struct radeon_winsys_cs *cs = rctx->b.gfx.cs; + unsigned i; - r600_emit_atom(ctx, &ctx->start_cs_cmd.atom); + /* make sure that the gfx ring is only one active */ + if (radeon_emitted(rctx->b.dma.cs, 0)) { + rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL); + } - struct r600_resource *onebo = NULL; + /* Initialize all the compute-related registers. + * + * See evergreen_init_atom_start_compute_cs() in this file for the list + * of registers initialized by the start_compute_cs_cmd atom. + */ + r600_emit_command_buffer(cs, &rctx->start_compute_cs_cmd); + + /* emit config state */ + if (rctx->b.chip_class == EVERGREEN) + r600_emit_atom(rctx, &rctx->config_state.atom); + + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; + r600_flush_emit(rctx); + + /* Emit colorbuffers. */ + /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */ + for (i = 0; i < 8 && i < rctx->framebuffer.state.nr_cbufs; i++) { + struct r600_surface *cb = (struct r600_surface*)rctx->framebuffer.state.cbufs[i]; + unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, + (struct r600_resource*)cb->base.texture, + RADEON_USAGE_READWRITE, + RADEON_PRIO_SHADER_RW_BUFFER); + + radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); + radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ + radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ + radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ + radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ + + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, reloc); + + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, reloc); + } + for (; i < 8 ; i++) + radeon_compute_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, + S_028C70_FORMAT(V_028C70_COLOR_INVALID)); + for (; i < 12; i++) + radeon_compute_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, + S_028C70_FORMAT(V_028C70_COLOR_INVALID)); - for (i = 0; i < get_compute_resource_num(); i++) { - if (ctx->cs_shader->resources[i].enabled) { - int j; - COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw); + /* Set CB_TARGET_MASK XXX: Use cb_misc_state */ + radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK, + rctx->compute_cb_target_mask); - for (j = 0; j < ctx->cs_shader->resources[i].cs_end; j++) { - if (ctx->cs_shader->resources[i].do_reloc[j]) { - assert(ctx->cs_shader->resources[i].bo); - evergreen_emit_ctx_reloc(ctx, - ctx->cs_shader->resources[i].bo, - ctx->cs_shader->resources[i].usage); - } - cs->buf[cs->cdw++] = ctx->cs_shader->resources[i].cs[j]; - } + /* Emit vertex buffer state */ + rctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(rctx->cs_vertex_buffer_state.dirty_mask); + r600_emit_atom(rctx, &rctx->cs_vertex_buffer_state.atom); - if (ctx->cs_shader->resources[i].bo) { - onebo = ctx->cs_shader->resources[i].bo; - evergreen_emit_ctx_reloc(ctx, - ctx->cs_shader->resources[i].bo, - ctx->cs_shader->resources[i].usage); - - ///special case for textures - if (ctx->cs_shader->resources[i].do_reloc - [ctx->cs_shader->resources[i].cs_end] == 2) { - evergreen_emit_ctx_reloc(ctx, - ctx->cs_shader->resources[i].bo, - ctx->cs_shader->resources[i].usage); - } - - evergreen_set_buffer_sync(ctx, ctx->cs_shader->resources[i].bo, - ctx->cs_shader->resources[i].coher_bo_size, - ctx->cs_shader->resources[i].flags, - ctx->cs_shader->resources[i].usage); - } - } - } + /* Emit constant buffer state */ + r600_emit_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE].atom); -#if 0 - COMPUTE_DBG("cdw: %i\n", cs->cdw); - for (i = 0; i < cs->cdw; i++) { - COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]); - } -#endif + /* Emit sampler state */ + r600_emit_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].states.atom); - ctx->ws->cs_flush(ctx->cs, RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE); + /* Emit sampler view (texture resource) state */ + r600_emit_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views.atom); - ctx->pm4_dirty_cdwords = 0; - ctx->flags = 0; + /* Emit compute shader state */ + r600_emit_atom(rctx, &rctx->cs_shader_state.atom); - COMPUTE_DBG("shader started\n"); + /* Emit dispatch state and dispatch packet */ + evergreen_emit_dispatch(rctx, info); - ctx->ws->buffer_wait(onebo->buf, 0); + /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff + */ + rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE | + R600_CONTEXT_INV_VERTEX_CACHE | + R600_CONTEXT_INV_TEX_CACHE; + r600_flush_emit(rctx); + rctx->b.flags = 0; + + if (rctx->b.chip_class >= CAYMAN) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); + /* DEALLOC_STATE prevents the GPU from hanging when a + * SURFACE_SYNC packet is emitted some time after a DISPATCH_DIRECT + * with any of the CB*_DEST_BASE_ENA or DB_DEST_BASE_ENA bits set. + */ + radeon_emit(cs, PKT3C(PKT3_DEALLOC_STATE, 0, 0)); + radeon_emit(cs, 0); + } - COMPUTE_DBG("...\n"); +#if 0 + COMPUTE_DBG(rctx->screen, "cdw: %i\n", cs->cdw); + for (i = 0; i < cs->cdw; i++) { + COMPUTE_DBG(rctx->screen, "%4i : 0x%08X\n", i, cs->buf[i]); + } +#endif - r600_emit_atom(ctx, &ctx->start_cs_cmd.atom); +} - ctx->streamout_start = TRUE; - ctx->streamout_append_bitmask = ~0; +/** + * Emit function for r600_cs_shader_state atom + */ +void evergreen_emit_cs_shader(struct r600_context *rctx, + struct r600_atom *atom) +{ + struct r600_cs_shader_state *state = + (struct r600_cs_shader_state*)atom; + struct r600_pipe_compute *shader = state->shader; + struct radeon_winsys_cs *cs = rctx->b.gfx.cs; + uint64_t va; + struct r600_resource *code_bo; + unsigned ngpr, nstack; + + code_bo = shader->code_bo; + va = shader->code_bo->gpu_address + state->pc; + ngpr = shader->bc.ngpr; + nstack = shader->bc.nstack; + + radeon_compute_set_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3); + radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */ + radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */ + S_0288D4_NUM_GPRS(ngpr) + | S_0288D4_STACK_SIZE(nstack)); + radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */ + + radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0)); + radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, + code_bo, RADEON_USAGE_READ, + RADEON_PRIO_USER_SHADER)); } -static void evergreen_launch_grid( - struct pipe_context *ctx_, - const uint *block_layout, const uint *grid_layout, - uint32_t pc, const void *input) +static void evergreen_launch_grid(struct pipe_context *ctx, + const struct pipe_grid_info *info) { - COMPUTE_DBG("PC: %i\n", pc); + struct r600_context *rctx = (struct r600_context *)ctx; +#ifdef HAVE_OPENCL + struct r600_pipe_compute *shader = rctx->cs_shader_state.shader; + boolean use_kill; - struct r600_context *ctx = (struct r600_context *)ctx_; - unsigned num_waves; - unsigned num_pipes = ctx->screen->info.r600_max_pipes; - unsigned wave_divisor = (16 * num_pipes); + rctx->cs_shader_state.pc = info->pc; + /* Get the config information for this kernel. */ + r600_shader_binary_read_config(&shader->binary, &shader->bc, + info->pc, &use_kill); +#endif - /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */ - num_waves = (block_layout[0] * block_layout[1] * block_layout[2] + - wave_divisor - 1) / wave_divisor; + COMPUTE_DBG(rctx->screen, "*** evergreen_launch_grid: pc = %u\n", info->pc); - COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n", - num_pipes, num_waves); - evergreen_set_lds(ctx->cs_shader, 0, 0, num_waves); - evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input); - evergreen_direct_dispatch(ctx_, block_layout, grid_layout); - compute_emit_cs(ctx); + evergreen_compute_upload_input(ctx, info); + compute_emit_cs(rctx, info); } -static void evergreen_set_compute_resources(struct pipe_context * ctx_, - unsigned start, unsigned count, - struct pipe_surface ** surfaces) +static void evergreen_set_compute_resources(struct pipe_context *ctx, + unsigned start, unsigned count, + struct pipe_surface **surfaces) { - struct r600_context *ctx = (struct r600_context *)ctx_; + struct r600_context *rctx = (struct r600_context *)ctx; struct r600_surface **resources = (struct r600_surface **)surfaces; - for (int i = 0; i < count; i++) { + + COMPUTE_DBG(rctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n", + start, count); + + for (unsigned i = 0; i < count; i++) { + /* The First four vertex buffers are reserved for parameters and + * global buffers. */ + unsigned vtx_id = 4 + i; if (resources[i]) { struct r600_resource_global *buffer = - (struct r600_resource_global*)resources[i]->base.texture; + (struct r600_resource_global*) + resources[i]->base.texture; if (resources[i]->base.writable) { assert(i+1 < 12); - struct r600_resource_global *buffer = - (struct r600_resource_global*) - resources[i]->base.texture; - evergreen_set_rat(ctx->cs_shader, i+1, + evergreen_set_rat(rctx->cs_shader_state.shader, i+1, (struct r600_resource *)resources[i]->base.texture, buffer->chunk->start_in_dw*4, resources[i]->base.texture->width0); } - evergreen_set_vtx_resource(ctx->cs_shader, - (struct r600_resource *)resources[i]->base.texture, i+2, - buffer->chunk->start_in_dw*4, resources[i]->base.writable); - } - } - -} - -static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_, - unsigned start_slot, unsigned count, - struct pipe_sampler_view **views) -{ - struct r600_context *ctx = (struct r600_context *)ctx_; - struct r600_pipe_sampler_view **resource = - (struct r600_pipe_sampler_view **)views; - - for (int i = 0; i < count; i++) { - if (resource[i]) { - assert(i+1 < 12); - ///FETCH0 = VTX0 (param buffer), - //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX - evergreen_set_tex_resource(ctx->cs_shader, resource[i], i+2); - } - } -} - -static void evergreen_bind_compute_sampler_states( - struct pipe_context *ctx_, - unsigned start_slot, - unsigned num_samplers, - void **samplers_) -{ - struct r600_context *ctx = (struct r600_context *)ctx_; - struct compute_sampler_state ** samplers = - (struct compute_sampler_state **)samplers_; - - for (int i = 0; i < num_samplers; i++) { - if (samplers[i]) { - evergreen_set_sampler_resource(ctx->cs_shader, samplers[i], i); + evergreen_cs_set_vertex_buffer(rctx, vtx_id, + buffer->chunk->start_in_dw * 4, + resources[i]->base.texture); } } } -static void evergreen_set_global_binding( - struct pipe_context *ctx_, unsigned first, unsigned n, - struct pipe_resource **resources, - uint32_t **handles) +static void evergreen_set_global_binding(struct pipe_context *ctx, + unsigned first, unsigned n, + struct pipe_resource **resources, + uint32_t **handles) { - struct r600_context *ctx = (struct r600_context *)ctx_; - struct compute_memory_pool *pool = ctx->screen->global_pool; + struct r600_context *rctx = (struct r600_context *)ctx; + struct compute_memory_pool *pool = rctx->screen->global_pool; struct r600_resource_global **buffers = (struct r600_resource_global **)resources; + unsigned i; + + COMPUTE_DBG(rctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n", + first, n); if (!resources) { /* XXX: Unset */ return; } - compute_memory_finalize_pending(pool, ctx_); + /* We mark these items for promotion to the pool if they + * aren't already there */ + for (i = first; i < first + n; i++) { + struct compute_memory_item *item = buffers[i]->chunk; - for (int i = 0; i < n; i++) + if (!is_item_in_pool(item)) + buffers[i]->chunk->status |= ITEM_FOR_PROMOTING; + } + + if (compute_memory_finalize_pending(pool, ctx) == -1) { + /* XXX: Unset */ + return; + } + + for (i = first; i < first + n; i++) { + uint32_t buffer_offset; + uint32_t handle; assert(resources[i]->target == PIPE_BUFFER); assert(resources[i]->bind & PIPE_BIND_GLOBAL); - *(handles[i]) = buffers[i]->chunk->start_in_dw * 4; + buffer_offset = util_le32_to_cpu(*(handles[i])); + handle = buffer_offset + buffers[i]->chunk->start_in_dw * 4; + + *(handles[i]) = util_cpu_to_le32(handle); } - evergreen_set_rat(ctx->cs_shader, 0, pool->bo, 0, pool->size_in_dw * 4); - evergreen_set_vtx_resource(ctx->cs_shader, pool->bo, 1, 0, 1); -} + /* globals for writing */ + evergreen_set_rat(rctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4); + /* globals for reading */ + evergreen_cs_set_vertex_buffer(rctx, 1, 0, + (struct pipe_resource*)pool->bo); + /* constants for reading, LLVM puts them in text segment */ + evergreen_cs_set_vertex_buffer(rctx, 2, 0, + (struct pipe_resource*)rctx->cs_shader_state.shader->code_bo); +} -void evergreen_compute_init_config(struct r600_context *ctx) +/** + * This function initializes all the compute specific registers that need to + * be initialized for each compute command stream. Registers that are common + * to both compute and 3D will be initialized at the beginning of each compute + * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG + * packet requires that the shader type bit be set, we must initialize all + * context registers needed for compute in this function. The registers + * initialized by the start_cs_cmd atom can be found in evergreen_state.c in the + * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending + * on the GPU family. + */ +void evergreen_init_atom_start_compute_cs(struct r600_context *rctx) { - struct evergreen_compute_resource* res = - get_empty_res(ctx->cs_shader, COMPUTE_RESOURCE_CONFIG, 0); - + struct r600_command_buffer *cb = &rctx->start_compute_cs_cmd; int num_threads; int num_stack_entries; - int num_temp_gprs; - enum radeon_family family; - unsigned tmp; + /* since all required registers are initialized in the + * start_compute_cs_cmd atom, we can EMIT_EARLY here. + */ + r600_init_command_buffer(cb, 256); + cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE; + + /* This must be first. */ + r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); + r600_store_value(cb, 0x80000000); + r600_store_value(cb, 0x80000000); - family = ctx->family; + /* We're setting config registers here. */ + r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); + r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - switch (family) { + switch (rctx->b.family) { case CHIP_CEDAR: default: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 256; break; case CHIP_REDWOOD: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 256; break; case CHIP_JUNIPER: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 512; break; case CHIP_CYPRESS: case CHIP_HEMLOCK: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 512; break; case CHIP_PALM: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 256; break; case CHIP_SUMO: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 256; break; case CHIP_SUMO2: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 512; break; case CHIP_BARTS: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 512; break; case CHIP_TURKS: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 256; break; case CHIP_CAICOS: - num_temp_gprs = 4; num_threads = 128; num_stack_entries = 256; break; } - tmp = 0x00000000; - switch (family) { - case CHIP_CEDAR: - case CHIP_PALM: - case CHIP_SUMO: - case CHIP_SUMO2: - case CHIP_CAICOS: - break; - default: - tmp |= S_008C00_VC_ENABLE(1); - break; + /* Config Registers */ + if (rctx->b.chip_class < CAYMAN) + evergreen_init_common_regs(rctx, cb, rctx->b.chip_class, rctx->b.family, + rctx->screen->b.info.drm_minor); + else + cayman_init_common_regs(cb, rctx->b.chip_class, rctx->b.family, + rctx->screen->b.info.drm_minor); + + /* The primitive type always needs to be POINTLIST for compute. */ + r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE, + V_008958_DI_PT_POINTLIST); + + if (rctx->b.chip_class < CAYMAN) { + + /* These registers control which simds can be used by each stage. + * The default for these registers is 0xffffffff, which means + * all simds are available for each stage. It's possible we may + * want to play around with these in the future, but for now + * the default value is fine. + * + * R_008E20_SQ_STATIC_THREAD_MGMT1 + * R_008E24_SQ_STATIC_THREAD_MGMT2 + * R_008E28_SQ_STATIC_THREAD_MGMT3 + */ + + /* XXX: We may need to adjust the thread and stack resource + * values for 3D/compute interop */ + + r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5); + + /* R_008C18_SQ_THREAD_RESOURCE_MGMT_1 + * Set the number of threads used by the PS/VS/GS/ES stage to + * 0. + */ + r600_store_value(cb, 0); + + /* R_008C1C_SQ_THREAD_RESOURCE_MGMT_2 + * Set the number of threads used by the CS (aka LS) stage to + * the maximum number of threads and set the number of threads + * for the HS stage to 0. */ + r600_store_value(cb, S_008C1C_NUM_LS_THREADS(num_threads)); + + /* R_008C20_SQ_STACK_RESOURCE_MGMT_1 + * Set the Control Flow stack entries to 0 for PS/VS stages */ + r600_store_value(cb, 0); + + /* R_008C24_SQ_STACK_RESOURCE_MGMT_2 + * Set the Control Flow stack entries to 0 for GS/ES stages */ + r600_store_value(cb, 0); + + /* R_008C28_SQ_STACK_RESOURCE_MGMT_3 + * Set the Contol Flow stack entries to 0 for the HS stage, and + * set it to the maximum value for the CS (aka LS) stage. */ + r600_store_value(cb, + S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries)); } - tmp |= S_008C00_EXPORT_SRC_C(1); - tmp |= S_008C00_CS_PRIO(0); - tmp |= S_008C00_LS_PRIO(0); - tmp |= S_008C00_HS_PRIO(0); - tmp |= S_008C00_PS_PRIO(0); - tmp |= S_008C00_VS_PRIO(0); - tmp |= S_008C00_GS_PRIO(0); - tmp |= S_008C00_ES_PRIO(0); - - evergreen_reg_set(res, R_008C00_SQ_CONFIG, tmp); - - evergreen_reg_set(res, R_008C04_SQ_GPR_RESOURCE_MGMT_1, - S_008C04_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); - if (ctx->chip_class < CAYMAN) { - evergreen_reg_set(res, R_008C08_SQ_GPR_RESOURCE_MGMT_2, 0); - } - evergreen_reg_set(res, R_008C10_SQ_GLOBAL_GPR_RESOURCE_MGMT_1, 0); - evergreen_reg_set(res, R_008C14_SQ_GLOBAL_GPR_RESOURCE_MGMT_2, 0); - evergreen_reg_set(res, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (1 << 8)); - - /* workaround for hw issues with dyn gpr - must set all limits to 240 - * instead of 0, 0x1e == 240/8 */ - if (ctx->chip_class < CAYMAN) { - evergreen_reg_set(res, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1, - S_028838_PS_GPRS(0x1e) | - S_028838_VS_GPRS(0x1e) | - S_028838_GS_GPRS(0x1e) | - S_028838_ES_GPRS(0x1e) | - S_028838_HS_GPRS(0x1e) | - S_028838_LS_GPRS(0x1e)); + /* Give the compute shader all the available LDS space. + * NOTE: This only sets the maximum number of dwords that a compute + * shader can allocate. When a shader is executed, we still need to + * allocate the appropriate amount of LDS dwords using the + * CM_R_0288E8_SQ_LDS_ALLOC register. + */ + if (rctx->b.chip_class < CAYMAN) { + r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT, + S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192)); } else { - evergreen_reg_set(res, 0x286f8, + r600_store_context_reg(cb, CM_R_0286FC_SPI_LDS_MGMT, + S_0286FC_NUM_PS_LDS(0) | + S_0286FC_NUM_LS_LDS(255)); /* 255 * 32 = 8160 dwords */ + } + + /* Context Registers */ + + if (rctx->b.chip_class < CAYMAN) { + /* workaround for hw issues with dyn gpr - must set all limits + * to 240 instead of 0, 0x1e == 240 / 8 + */ + r600_store_context_reg(cb, R_028838_SQ_DYN_GPR_RESOURCE_LIMIT_1, S_028838_PS_GPRS(0x1e) | S_028838_VS_GPRS(0x1e) | S_028838_GS_GPRS(0x1e) | @@ -635,97 +862,132 @@ void evergreen_compute_init_config(struct r600_context *ctx) S_028838_LS_GPRS(0x1e)); } - if (ctx->chip_class < CAYMAN) { - - evergreen_reg_set(res, R_008E20_SQ_STATIC_THREAD_MGMT1, 0xFFFFFFFF); - evergreen_reg_set(res, R_008E24_SQ_STATIC_THREAD_MGMT2, 0xFFFFFFFF); - evergreen_reg_set(res, R_008E20_SQ_STATIC_THREAD_MGMT1, 0xFFFFFFFF); - evergreen_reg_set(res, R_008E24_SQ_STATIC_THREAD_MGMT2, 0xFFFFFFFF); - evergreen_reg_set(res, R_008E28_SQ_STATIC_THREAD_MGMT3, 0xFFFFFFFF); - evergreen_reg_set(res, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 0); - tmp = S_008C1C_NUM_LS_THREADS(num_threads); - evergreen_reg_set(res, R_008C1C_SQ_THREAD_RESOURCE_MGMT_2, tmp); - evergreen_reg_set(res, R_008C20_SQ_STACK_RESOURCE_MGMT_1, 0); - evergreen_reg_set(res, R_008C24_SQ_STACK_RESOURCE_MGMT_2, 0); - tmp = S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries); - evergreen_reg_set(res, R_008C28_SQ_STACK_RESOURCE_MGMT_3, tmp); - } - evergreen_reg_set(res, R_0286CC_SPI_PS_IN_CONTROL_0, S_0286CC_LINEAR_GRADIENT_ENA(1)); - evergreen_reg_set(res, R_0286D0_SPI_PS_IN_CONTROL_1, 0); - evergreen_reg_set(res, R_0286E4_SPI_PS_IN_CONTROL_2, 0); - evergreen_reg_set(res, R_0286D8_SPI_INPUT_Z, 0); - evergreen_reg_set(res, R_0286E0_SPI_BARYC_CNTL, 1 << 20); - tmp = S_0286E8_TID_IN_GROUP_ENA | S_0286E8_TGID_ENA | S_0286E8_DISABLE_INDEX_PACK; - evergreen_reg_set(res, R_0286E8_SPI_COMPUTE_INPUT_CNTL, tmp); - tmp = S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1); - evergreen_reg_set(res, R_028A40_VGT_GS_MODE, tmp); - evergreen_reg_set(res, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/); - evergreen_reg_set(res, R_028800_DB_DEPTH_CONTROL, 0); - evergreen_reg_set(res, R_02880C_DB_SHADER_CONTROL, 0); - evergreen_reg_set(res, R_028000_DB_RENDER_CONTROL, S_028000_COLOR_DISABLE(1)); - evergreen_reg_set(res, R_02800C_DB_RENDER_OVERRIDE, 0); - evergreen_reg_set(res, R_0286E8_SPI_COMPUTE_INPUT_CNTL, + /* XXX: Investigate setting bit 15, which is FAST_COMPUTE_MODE */ + r600_store_context_reg(cb, R_028A40_VGT_GS_MODE, + S_028A40_COMPUTE_MODE(1) | S_028A40_PARTIAL_THD_AT_EOI(1)); + + r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/); + + r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL, S_0286E8_TID_IN_GROUP_ENA | S_0286E8_TGID_ENA | S_0286E8_DISABLE_INDEX_PACK) ; + + /* The LOOP_CONST registers are an optimizations for loops that allows + * you to store the initial counter, increment value, and maximum + * counter value in a register so that hardware can calculate the + * correct number of iterations for the loop, so that you don't need + * to have the loop counter in your shader code. We don't currently use + * this optimization, so we must keep track of the counter in the + * shader and use a break instruction to exit loops. However, the + * hardware will still uses this register to determine when to exit a + * loop, so we need to initialize the counter to 0, set the increment + * value to 1 and the maximum counter value to the 4095 (0xfff) which + * is the maximum value allowed. This gives us a maximum of 4096 + * iterations for our loops, but hopefully our break instruction will + * execute before some time before the 4096th iteration. + */ + eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF); } -void evergreen_init_compute_state_functions(struct r600_context *ctx) +void evergreen_init_compute_state_functions(struct r600_context *rctx) { - ctx->context.create_compute_state = evergreen_create_compute_state; - ctx->context.delete_compute_state = evergreen_delete_compute_state; - ctx->context.bind_compute_state = evergreen_bind_compute_state; -// ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; - ctx->context.set_compute_resources = evergreen_set_compute_resources; - ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view; - ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states; - ctx->context.set_global_binding = evergreen_set_global_binding; - ctx->context.launch_grid = evergreen_launch_grid; -} + rctx->b.b.create_compute_state = evergreen_create_compute_state; + rctx->b.b.delete_compute_state = evergreen_delete_compute_state; + rctx->b.b.bind_compute_state = evergreen_bind_compute_state; +// rctx->context.create_sampler_view = evergreen_compute_create_sampler_view; + rctx->b.b.set_compute_resources = evergreen_set_compute_resources; + rctx->b.b.set_global_binding = evergreen_set_global_binding; + rctx->b.b.launch_grid = evergreen_launch_grid; +} -struct pipe_resource *r600_compute_global_buffer_create( - struct pipe_screen *screen, - const struct pipe_resource *templ) +static void *r600_compute_global_transfer_map(struct pipe_context *ctx, + struct pipe_resource *resource, + unsigned level, + unsigned usage, + const struct pipe_box *box, + struct pipe_transfer **ptransfer) { - assert(templ->target == PIPE_BUFFER); - assert(templ->bind & PIPE_BIND_GLOBAL); - assert(templ->array_size == 1 || templ->array_size == 0); - assert(templ->depth0 == 1 || templ->depth0 == 0); - assert(templ->height0 == 1 || templ->height0 == 0); + struct r600_context *rctx = (struct r600_context*)ctx; + struct compute_memory_pool *pool = rctx->screen->global_pool; + struct r600_resource_global* buffer = + (struct r600_resource_global*)resource; - struct r600_resource_global* result = (struct r600_resource_global*) - CALLOC(sizeof(struct r600_resource_global), 1); - struct r600_screen* rscreen = (struct r600_screen*)screen; + struct compute_memory_item *item = buffer->chunk; + struct pipe_resource *dst = NULL; + unsigned offset = box->x; - result->base.b.vtbl = &r600_global_buffer_vtbl; - result->base.b.b.screen = screen; - result->base.b.b = *templ; - pipe_reference_init(&result->base.b.b.reference, 1); + if (is_item_in_pool(item)) { + compute_memory_demote_item(pool, item, ctx); + } + else { + if (item->real_buffer == NULL) { + item->real_buffer = + r600_compute_buffer_alloc_vram(pool->screen, item->size_in_dw * 4); + } + } - int size_in_dw = (templ->width0+3) / 4; + dst = (struct pipe_resource*)item->real_buffer; - result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw); + if (usage & PIPE_TRANSFER_READ) + buffer->chunk->status |= ITEM_MAPPED_FOR_READING; - if (result->chunk == NULL) - { - free(result); - return NULL; - } + COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n" + "level = %u, usage = %u, box(x = %u, y = %u, z = %u " + "width = %u, height = %u, depth = %u)\n", level, usage, + box->x, box->y, box->z, box->width, box->height, + box->depth); + COMPUTE_DBG(rctx->screen, "Buffer id = %"PRIi64" offset = " + "%u (box.x)\n", item->id, box->x); - return &result->base.b.b; + + assert(resource->target == PIPE_BUFFER); + assert(resource->bind & PIPE_BIND_GLOBAL); + assert(box->x >= 0); + assert(box->y == 0); + assert(box->z == 0); + + ///TODO: do it better, mapping is not possible if the pool is too big + return pipe_buffer_map_range(ctx, dst, + offset, box->width, usage, ptransfer); } -void r600_compute_global_buffer_destroy( - struct pipe_screen *screen, - struct pipe_resource *res) +static void r600_compute_global_transfer_unmap(struct pipe_context *ctx, + struct pipe_transfer *transfer) { + /* struct r600_resource_global are not real resources, they just map + * to an offset within the compute memory pool. The function + * r600_compute_global_transfer_map() maps the memory pool + * resource rather than the struct r600_resource_global passed to + * it as an argument and then initalizes ptransfer->resource with + * the memory pool resource (via pipe_buffer_map_range). + * When transfer_unmap is called it uses the memory pool's + * vtable which calls r600_buffer_transfer_map() rather than + * this function. + */ + assert (!"This function should not be called"); +} + +static void r600_compute_global_transfer_flush_region(struct pipe_context *ctx, + struct pipe_transfer *transfer, + const struct pipe_box *box) +{ + assert(0 && "TODO"); +} + +static void r600_compute_global_buffer_destroy(struct pipe_screen *screen, + struct pipe_resource *res) +{ + struct r600_resource_global* buffer = NULL; + struct r600_screen* rscreen = NULL; + assert(res->target == PIPE_BUFFER); assert(res->bind & PIPE_BIND_GLOBAL); - struct r600_resource_global* buffer = (struct r600_resource_global*)res; - struct r600_screen* rscreen = (struct r600_screen*)screen; + buffer = (struct r600_resource_global*)res; + rscreen = (struct r600_screen*)screen; compute_memory_free(rscreen->global_pool, buffer->chunk->id); @@ -733,101 +995,50 @@ void r600_compute_global_buffer_destroy( free(res); } -void* r600_compute_global_transfer_map( - struct pipe_context *ctx_, - struct pipe_transfer* transfer) +static const struct u_resource_vtbl r600_global_buffer_vtbl = { - assert(transfer->resource->target == PIPE_BUFFER); - assert(transfer->resource->bind & PIPE_BIND_GLOBAL); - assert(transfer->box.x >= 0); - assert(transfer->box.y == 0); - assert(transfer->box.z == 0); - - struct r600_context *ctx = (struct r600_context *)ctx_; - struct r600_resource_global* buffer = - (struct r600_resource_global*)transfer->resource; - - uint32_t* map; - ///TODO: do it better, mapping is not possible if the pool is too big - - if (!(map = ctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf, - ctx->cs, transfer->usage))) { - return NULL; - } - - COMPUTE_DBG("buffer start: %lli\n", buffer->chunk->start_in_dw); - return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x; -} + u_default_resource_get_handle, /* get_handle */ + r600_compute_global_buffer_destroy, /* resource_destroy */ + r600_compute_global_transfer_map, /* transfer_map */ + r600_compute_global_transfer_flush_region,/* transfer_flush_region */ + r600_compute_global_transfer_unmap, /* transfer_unmap */ +}; -void r600_compute_global_transfer_unmap( - struct pipe_context *ctx_, - struct pipe_transfer* transfer) +struct pipe_resource *r600_compute_global_buffer_create(struct pipe_screen *screen, + const struct pipe_resource *templ) { - assert(transfer->resource->target == PIPE_BUFFER); - assert(transfer->resource->bind & PIPE_BIND_GLOBAL); + struct r600_resource_global* result = NULL; + struct r600_screen* rscreen = NULL; + int size_in_dw = 0; - struct r600_context *ctx = (struct r600_context *)ctx_; - struct r600_resource_global* buffer = - (struct r600_resource_global*)transfer->resource; + assert(templ->target == PIPE_BUFFER); + assert(templ->bind & PIPE_BIND_GLOBAL); + assert(templ->array_size == 1 || templ->array_size == 0); + assert(templ->depth0 == 1 || templ->depth0 == 0); + assert(templ->height0 == 1 || templ->height0 == 0); - ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf); -} + result = (struct r600_resource_global*) + CALLOC(sizeof(struct r600_resource_global), 1); + rscreen = (struct r600_screen*)screen; -struct pipe_transfer * r600_compute_global_get_transfer( - struct pipe_context *ctx_, - struct pipe_resource *resource, - unsigned level, - unsigned usage, - const struct pipe_box *box) -{ - struct r600_context *ctx = (struct r600_context *)ctx_; - struct compute_memory_pool *pool = ctx->screen->global_pool; + COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n"); + COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0, + templ->array_size); - compute_memory_finalize_pending(pool, ctx_); + result->base.b.vtbl = &r600_global_buffer_vtbl; + result->base.b.b = *templ; + result->base.b.b.screen = screen; + pipe_reference_init(&result->base.b.b.reference, 1); - assert(resource->target == PIPE_BUFFER); - struct r600_context *rctx = (struct r600_context*)ctx_; - struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers); - - transfer->resource = resource; - transfer->level = level; - transfer->usage = usage; - transfer->box = *box; - transfer->stride = 0; - transfer->layer_stride = 0; - transfer->data = NULL; - - /* Note strides are zero, this is ok for buffers, but not for - * textures 2d & higher at least. - */ - return transfer; -} + size_in_dw = (templ->width0+3) / 4; -void r600_compute_global_transfer_destroy( - struct pipe_context *ctx_, - struct pipe_transfer *transfer) -{ - struct r600_context *rctx = (struct r600_context*)ctx_; - util_slab_free(&rctx->pool_transfers, transfer); -} + result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw); -void r600_compute_global_transfer_flush_region( - struct pipe_context *ctx_, - struct pipe_transfer *transfer, - const struct pipe_box *box) -{ - assert(0 && "TODO"); -} + if (result->chunk == NULL) + { + free(result); + return NULL; + } -void r600_compute_global_transfer_inline_write( - struct pipe_context *pipe, - struct pipe_resource *resource, - unsigned level, - unsigned usage, - const struct pipe_box *box, - const void *data, - unsigned stride, - unsigned layer_stride) -{ - assert(0 && "TODO"); + return &result->base.b.b; }