X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fr600%2Fevergreen_compute.c;h=33009c16f688985005bb4b13f93715a41215ccd2;hb=d6fbcf6ee28c273b37bf293aea5faf77253029a3;hp=25263f3b013cc024d08fc541229ba580f740f122;hpb=e5a9bf55231aa14f6ae831a5c47d7176cb6c230b;p=mesa.git diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c index 25263f3b013..33009c16f68 100644 --- a/src/gallium/drivers/r600/evergreen_compute.c +++ b/src/gallium/drivers/r600/evergreen_compute.c @@ -30,7 +30,7 @@ #include "pipe/p_state.h" #include "pipe/p_context.h" #include "util/u_blitter.h" -#include "util/u_double_list.h" +#include "util/list.h" #include "util/u_transfer.h" #include "util/u_surface.h" #include "util/u_pack_color.h" @@ -38,19 +38,19 @@ #include "util/u_inlines.h" #include "util/u_framebuffer.h" #include "pipebuffer/pb_buffer.h" -#include "r600.h" #include "evergreend.h" -#include "r600_resource.h" #include "r600_shader.h" #include "r600_pipe.h" #include "r600_formats.h" #include "evergreen_compute.h" -#include "r600_hw_context_priv.h" #include "evergreen_compute_internal.h" #include "compute_memory_pool.h" +#include "sb/sb_public.h" #ifdef HAVE_OPENCL -#include "llvm_wrapper.h" +#include "radeon/radeon_llvm_util.h" #endif +#include "radeon/radeon_elf_util.h" +#include /** RAT0 is for global binding write @@ -83,6 +83,68 @@ writable images will consume TEX slots, VTX slots too because of linear indexing */ +struct r600_resource* r600_compute_buffer_alloc_vram( + struct r600_screen *screen, + unsigned size) +{ + struct pipe_resource * buffer = NULL; + assert(size); + + buffer = pipe_buffer_create( + (struct pipe_screen*) screen, + PIPE_BIND_CUSTOM, + PIPE_USAGE_IMMUTABLE, + size); + + return (struct r600_resource *)buffer; +} + + +static void evergreen_set_rat( + struct r600_pipe_compute *pipe, + unsigned id, + struct r600_resource* bo, + int start, + int size) +{ + struct pipe_surface rat_templ; + struct r600_surface *surf = NULL; + struct r600_context *rctx = NULL; + + assert(id < 12); + assert((size & 3) == 0); + assert((start & 0xFF) == 0); + + rctx = pipe->ctx; + + COMPUTE_DBG(rctx->screen, "bind rat: %i \n", id); + + /* Create the RAT surface */ + memset(&rat_templ, 0, sizeof(rat_templ)); + rat_templ.format = PIPE_FORMAT_R32_UINT; + rat_templ.u.tex.level = 0; + rat_templ.u.tex.first_layer = 0; + rat_templ.u.tex.last_layer = 0; + + /* Add the RAT the list of color buffers */ + pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface( + (struct pipe_context *)pipe->ctx, + (struct pipe_resource *)bo, &rat_templ); + + /* Update the number of color buffers */ + pipe->ctx->framebuffer.state.nr_cbufs = + MAX2(id + 1, pipe->ctx->framebuffer.state.nr_cbufs); + + /* Update the cb_target_mask + * XXX: I think this is a potential spot for bugs once we start doing + * GL interop. cb_target_mask may be modified in the 3D sections + * of this driver. */ + pipe->ctx->compute_cb_target_mask |= (0xf << (id * 4)); + + surf = (struct r600_surface*)pipe->ctx->framebuffer.state.cbufs[id]; + evergreen_init_color_surface_rat(rctx, surf); +} + static void evergreen_cs_set_vertex_buffer( struct r600_context * rctx, unsigned vb_index, @@ -98,10 +160,26 @@ static void evergreen_cs_set_vertex_buffer( /* The vertex instructions in the compute shaders use the texture cache, * so we need to invalidate it. */ - rctx->flags |= R600_CONTEXT_TEX_FLUSH; + rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE; state->enabled_mask |= 1 << vb_index; state->dirty_mask |= 1 << vb_index; - state->atom.dirty = true; + r600_mark_atom_dirty(rctx, &state->atom); +} + +static void evergreen_cs_set_constant_buffer( + struct r600_context * rctx, + unsigned cb_index, + unsigned offset, + unsigned size, + struct pipe_resource * buffer) +{ + struct pipe_constant_buffer cb; + cb.buffer_size = size; + cb.buffer_offset = offset; + cb.buffer = buffer; + cb.user_buffer = NULL; + + rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, &cb); } static const struct u_resource_vtbl r600_global_buffer_vtbl = @@ -121,36 +199,49 @@ void *evergreen_create_compute_state( { struct r600_context *ctx = (struct r600_context *)ctx_; struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute); - #ifdef HAVE_OPENCL const struct pipe_llvm_program_header * header; - const unsigned char * code; - unsigned i; - - COMPUTE_DBG("*** evergreen_create_compute_state\n"); + const char *code; + void *p; + boolean use_kill; + COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n"); header = cso->prog; code = cso->prog + sizeof(struct pipe_llvm_program_header); +#if HAVE_LLVM < 0x0306 + (void)use_kill; + (void)p; + shader->llvm_ctx = LLVMContextCreate(); + shader->num_kernels = radeon_llvm_get_num_kernels(shader->llvm_ctx, + code, header->num_bytes); + shader->kernels = CALLOC(sizeof(struct r600_kernel), + shader->num_kernels); + { + unsigned i; + for (i = 0; i < shader->num_kernels; i++) { + struct r600_kernel *kernel = &shader->kernels[i]; + kernel->llvm_module = radeon_llvm_get_kernel_module( + shader->llvm_ctx, i, code, header->num_bytes); + } + } +#else + memset(&shader->binary, 0, sizeof(shader->binary)); + radeon_elf_read(code, header->num_bytes, &shader->binary); + r600_create_shader(&shader->bc, &shader->binary, &use_kill); + + shader->code_bo = r600_compute_buffer_alloc_vram(ctx->screen, + shader->bc.ndw * 4); + p = r600_buffer_map_sync_with_rings(&ctx->b, shader->code_bo, PIPE_TRANSFER_WRITE); + memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4); + ctx->b.ws->buffer_unmap(shader->code_bo->cs_buf); +#endif #endif shader->ctx = (struct r600_context*)ctx; - shader->resources = (struct evergreen_compute_resource*) - CALLOC(sizeof(struct evergreen_compute_resource), - get_compute_resource_num()); - shader->local_size = cso->req_local_mem; ///TODO: assert it + shader->local_size = cso->req_local_mem; shader->private_size = cso->req_private_mem; shader->input_size = cso->req_input_mem; -#ifdef HAVE_OPENCL - shader->num_kernels = llvm_get_num_kernels(code, header->num_bytes); - shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels); - - for (i = 0; i < shader->num_kernels; i++) { - struct r600_kernel *kernel = &shader->kernels[i]; - kernel->llvm_module = llvm_get_kernel_module(i, code, - header->num_bytes); - } -#endif return shader; } @@ -158,21 +249,23 @@ void evergreen_delete_compute_state(struct pipe_context *ctx, void* state) { struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state; - free(shader->resources); - free(shader); + if (!shader) + return; + + FREE(shader); } static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state) { struct r600_context *ctx = (struct r600_context *)ctx_; - COMPUTE_DBG("*** evergreen_bind_compute_state\n"); + COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n"); ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state; } /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit - * kernel parameters there are inplicit parameters that need to be stored + * kernel parameters there are implicit parameters that need to be stored * in the vertex buffer as well. Here is how these parameters are organized in * the buffer: * @@ -190,28 +283,34 @@ void evergreen_compute_upload_input( { struct r600_context *ctx = (struct r600_context *)ctx_; struct r600_pipe_compute *shader = ctx->cs_shader_state.shader; - int i; - unsigned kernel_parameters_offset_bytes = 36; + unsigned i; + /* We need to reserve 9 dwords (36 bytes) for implicit kernel + * parameters. + */ + unsigned input_size = shader->input_size + 36; uint32_t * num_work_groups_start; uint32_t * global_size_start; uint32_t * local_size_start; uint32_t * kernel_parameters_start; + struct pipe_box box; + struct pipe_transfer *transfer = NULL; if (shader->input_size == 0) { return; } if (!shader->kernel_param) { - unsigned buffer_size = shader->input_size; - /* Add space for the grid dimensions */ - buffer_size += kernel_parameters_offset_bytes * sizeof(uint); - shader->kernel_param = r600_compute_buffer_alloc_vram( - ctx->screen, buffer_size); + shader->kernel_param = (struct r600_resource *) + pipe_buffer_create(ctx_->screen, PIPE_BIND_CUSTOM, + PIPE_USAGE_IMMUTABLE, input_size); } - num_work_groups_start = ctx->ws->buffer_map( - shader->kernel_param->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE); + u_box_1d(0, input_size, &box); + num_work_groups_start = ctx_->transfer_map(ctx_, + (struct pipe_resource*)shader->kernel_param, + 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE, + &box, &transfer); global_size_start = num_work_groups_start + (3 * (sizeof(uint) /4)); local_size_start = global_size_start + (3 * (sizeof(uint)) / 4); kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4); @@ -230,20 +329,16 @@ void evergreen_compute_upload_input( /* Copy the kernel inputs */ memcpy(kernel_parameters_start, input, shader->input_size); - for (i = 0; i < (kernel_parameters_offset_bytes / 4) + - (shader->input_size / 4); i++) { - COMPUTE_DBG("input %i : %i\n", i, + for (i = 0; i < (input_size / 4); i++) { + COMPUTE_DBG(ctx->screen, "input %i : %u\n", i, ((unsigned*)num_work_groups_start)[i]); } - ctx->ws->buffer_unmap(shader->kernel_param->cs_buf); + ctx_->transfer_unmap(ctx_, transfer); - ///ID=0 is reserved for the parameters - evergreen_cs_set_vertex_buffer(ctx, 0, 0, + /* ID=0 is reserved for the parameters */ + evergreen_cs_set_constant_buffer(ctx, 0, 0, input_size, (struct pipe_resource*)shader->kernel_param); - ///ID=0 is reserved for parameters - evergreen_set_const_cache(shader, 0, shader->kernel_param, - shader->input_size, 0); } static void evergreen_emit_direct_dispatch( @@ -251,14 +346,20 @@ static void evergreen_emit_direct_dispatch( const uint *block_layout, const uint *grid_layout) { int i; - struct radeon_winsys_cs *cs = rctx->cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct r600_pipe_compute *shader = rctx->cs_shader_state.shader; unsigned num_waves; - unsigned num_pipes = rctx->screen->info.r600_max_pipes; + unsigned num_pipes = rctx->screen->b.info.r600_max_pipes; unsigned wave_divisor = (16 * num_pipes); int group_size = 1; int grid_size = 1; - /* XXX: Enable lds and get size from cs_shader_state */ - unsigned lds_size = 0; + unsigned lds_size = shader->local_size / 4 + +#if HAVE_LLVM < 0x0306 + shader->active_kernel->bc.nlds_dw; +#else + shader->bc.nlds_dw; +#endif + /* Calculate group_size/grid_size */ for (i = 0; i < 3; i++) { @@ -273,94 +374,109 @@ static void evergreen_emit_direct_dispatch( num_waves = (block_layout[0] * block_layout[1] * block_layout[2] + wave_divisor - 1) / wave_divisor; - COMPUTE_DBG("Using %u pipes, there are %u wavefronts per thread block\n", - num_pipes, num_waves); - - /* XXX: Partition the LDS between PS/CS. By default half (4096 dwords - * on Evergreen) oes to Pixel Shaders and half goes to Compute Shaders. - * We may need to allocat the entire LDS space for Compute Shaders. - * - * EG: R_008E2C_SQ_LDS_RESOURCE_MGMT := S_008E2C_NUM_LS_LDS(lds_dwords) - * CM: CM_R_0286FC_SPI_LDS_MGMT := S_0286FC_NUM_LS_LDS(lds_dwords) - */ + COMPUTE_DBG(rctx->screen, "Using %u pipes, " + "%u wavefronts per thread block, " + "allocating %u dwords lds.\n", + num_pipes, num_waves, lds_size); - r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size); + radeon_set_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size); - r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3); - r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */ - r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */ - r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */ + radeon_set_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3); + radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */ + radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */ + radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */ - r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, + radeon_set_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size); - r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3); - r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ - r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ - r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ + radeon_compute_set_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3); + radeon_emit(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ + radeon_emit(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ + radeon_emit(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ + + if (rctx->b.chip_class < CAYMAN) { + assert(lds_size <= 8192); + } else { + /* Cayman appears to have a slightly smaller limit, see the + * value of CM_R_0286FC_SPI_LDS_MGMT.NUM_LS_LDS */ + assert(lds_size <= 8160); + } - r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC, + radeon_compute_set_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC, lds_size | (num_waves << 14)); /* Dispatch packet */ - r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0)); - r600_write_value(cs, grid_layout[0]); - r600_write_value(cs, grid_layout[1]); - r600_write_value(cs, grid_layout[2]); + radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0)); + radeon_emit(cs, grid_layout[0]); + radeon_emit(cs, grid_layout[1]); + radeon_emit(cs, grid_layout[2]); /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */ - r600_write_value(cs, 1); + radeon_emit(cs, 1); } static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, const uint *grid_layout) { - struct radeon_winsys_cs *cs = ctx->cs; - unsigned flush_flags = 0; - int i; + struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; + unsigned i; - struct r600_resource *onebo = NULL; - struct evergreen_compute_resource *resources = - ctx->cs_shader_state.shader->resources; + /* make sure that the gfx ring is only one active */ + if (ctx->b.rings.dma.cs && ctx->b.rings.dma.cs->cdw) { + ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL); + } /* Initialize all the compute-related registers. * * See evergreen_init_atom_start_compute_cs() in this file for the list * of registers initialized by the start_compute_cs_cmd atom. */ - r600_emit_command_buffer(ctx->cs, &ctx->start_compute_cs_cmd); + r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd); - ctx->flags |= R600_CONTEXT_CB_FLUSH; + ctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; r600_flush_emit(ctx); /* Emit colorbuffers. */ - for (i = 0; i < ctx->framebuffer.state.nr_cbufs; i++) { + /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */ + for (i = 0; i < 8 && i < ctx->framebuffer.state.nr_cbufs; i++) { struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i]; - unsigned reloc = r600_context_bo_reloc(ctx, (struct r600_resource*)cb->base.texture, - RADEON_USAGE_READWRITE); - - r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); - r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ - r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ - r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ - r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ - r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ - r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ - r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ - - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ - r600_write_value(cs, reloc); + unsigned reloc = radeon_add_to_buffer_list(&ctx->b, &ctx->b.rings.gfx, + (struct r600_resource*)cb->base.texture, + RADEON_USAGE_READWRITE, + RADEON_PRIO_SHADER_RESOURCE_RW); + + radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); + radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ + radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ + radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ + radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ + + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, reloc); if (!ctx->keep_tiling_flags) { - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, reloc); } - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, reloc); + } + if (ctx->keep_tiling_flags) { + for (; i < 8 ; i++) { + radeon_compute_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, + S_028C70_FORMAT(V_028C70_COLOR_INVALID)); + } + for (; i < 12; i++) { + radeon_compute_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, + S_028C70_FORMAT(V_028C70_COLOR_INVALID)); + } } /* Set CB_TARGET_MASK XXX: Use cb_misc_state */ - r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK, + radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK, ctx->compute_cb_target_mask); @@ -368,76 +484,47 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask); r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom); - /* Emit compute shader state */ - r600_emit_atom(ctx, &ctx->cs_shader_state.atom); - - for (i = 0; i < get_compute_resource_num(); i++) { - if (resources[i].enabled) { - int j; - COMPUTE_DBG("resnum: %i, cdw: %i\n", i, cs->cdw); + /* Emit constant buffer state */ + r600_emit_atom(ctx, &ctx->constbuf_state[PIPE_SHADER_COMPUTE].atom); - for (j = 0; j < resources[i].cs_end; j++) { - if (resources[i].do_reloc[j]) { - assert(resources[i].bo); - evergreen_emit_ctx_reloc(ctx, - resources[i].bo, - resources[i].usage); - } + /* Emit sampler state */ + r600_emit_atom(ctx, &ctx->samplers[PIPE_SHADER_COMPUTE].states.atom); - cs->buf[cs->cdw++] = resources[i].cs[j]; - } + /* Emit sampler view (texture resource) state */ + r600_emit_atom(ctx, &ctx->samplers[PIPE_SHADER_COMPUTE].views.atom); - if (resources[i].bo) { - onebo = resources[i].bo; - evergreen_emit_ctx_reloc(ctx, - resources[i].bo, - resources[i].usage); - - ///special case for textures - if (resources[i].do_reloc - [resources[i].cs_end] == 2) { - evergreen_emit_ctx_reloc(ctx, - resources[i].bo, - resources[i].usage); - } - } - } - } + /* Emit compute shader state */ + r600_emit_atom(ctx, &ctx->cs_shader_state.atom); /* Emit dispatch state and dispatch packet */ evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout); /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff */ - ctx->flags |= R600_CONTEXT_CB_FLUSH; + ctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE | + R600_CONTEXT_INV_VERTEX_CACHE | + R600_CONTEXT_INV_TEX_CACHE; r600_flush_emit(ctx); + ctx->b.flags = 0; + + if (ctx->b.chip_class >= CAYMAN) { + cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); + cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4); + /* DEALLOC_STATE prevents the GPU from hanging when a + * SURFACE_SYNC packet is emitted some time after a DISPATCH_DIRECT + * with any of the CB*_DEST_BASE_ENA or DB_DEST_BASE_ENA bits set. + */ + cs->buf[cs->cdw++] = PKT3C(PKT3_DEALLOC_STATE, 0, 0); + cs->buf[cs->cdw++] = 0; + } #if 0 - COMPUTE_DBG("cdw: %i\n", cs->cdw); + COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw); for (i = 0; i < cs->cdw; i++) { - COMPUTE_DBG("%4i : 0x%08X\n", i, ctx->cs->buf[i]); + COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, cs->buf[i]); } #endif - flush_flags = RADEON_FLUSH_ASYNC | RADEON_FLUSH_COMPUTE; - if (ctx->keep_tiling_flags) { - flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS; - } - - ctx->ws->cs_flush(ctx->cs, flush_flags); - - ctx->pm4_dirty_cdwords = 0; - ctx->flags = 0; - - COMPUTE_DBG("shader started\n"); - - ctx->ws->buffer_wait(onebo->buf, 0); - - COMPUTE_DBG("...\n"); - - ctx->streamout_start = TRUE; - ctx->streamout_append_bitmask = ~0; - } @@ -451,24 +538,35 @@ void evergreen_emit_cs_shader( struct r600_cs_shader_state *state = (struct r600_cs_shader_state*)atom; struct r600_pipe_compute *shader = state->shader; - struct r600_kernel *kernel = &shader->kernels[state->kernel_index]; - struct radeon_winsys_cs *cs = rctx->cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint64_t va; + struct r600_resource *code_bo; + unsigned ngpr, nstack; - va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b); - - r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3); - r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */ - r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */ - S_0288D4_NUM_GPRS(kernel->bc.ngpr) - | S_0288D4_STACK_SIZE(kernel->bc.nstack)); - r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */ - - r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0)); - r600_write_value(cs, r600_context_bo_reloc(rctx, kernel->code_bo, - RADEON_USAGE_READ)); +#if HAVE_LLVM < 0x0306 + struct r600_kernel *kernel = &shader->kernels[state->kernel_index]; + code_bo = kernel->code_bo; + va = kernel->code_bo->gpu_address; + ngpr = kernel->bc.ngpr; + nstack = kernel->bc.nstack; +#else + code_bo = shader->code_bo; + va = shader->code_bo->gpu_address + state->pc; + ngpr = shader->bc.ngpr; + nstack = shader->bc.nstack; +#endif - rctx->flags |= R600_CONTEXT_SHADERCONST_FLUSH; + radeon_compute_set_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3); + radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */ + radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */ + S_0288D4_NUM_GPRS(ngpr) + | S_0288D4_STACK_SIZE(nstack)); + radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */ + + radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0)); + radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, + code_bo, RADEON_USAGE_READ, + RADEON_PRIO_SHADER_DATA)); } static void evergreen_launch_grid( @@ -477,25 +575,54 @@ static void evergreen_launch_grid( uint32_t pc, const void *input) { struct r600_context *ctx = (struct r600_context *)ctx_; - -#ifdef HAVE_OPENCL - COMPUTE_DBG("*** evergreen_launch_grid: pc = %u\n", pc); - +#ifdef HAVE_OPENCL struct r600_pipe_compute *shader = ctx->cs_shader_state.shader; - if (!shader->kernels[pc].code_bo) { - void *p; - struct r600_kernel *kernel = &shader->kernels[pc]; - r600_compute_shader_create(ctx_, kernel->llvm_module, &kernel->bc); - kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen, - kernel->bc.ndw * 4); - p = ctx->ws->buffer_map(kernel->code_bo->cs_buf, ctx->cs, - PIPE_TRANSFER_WRITE); - memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4); - ctx->ws->buffer_unmap(kernel->code_bo->cs_buf); - } + boolean use_kill; + +#if HAVE_LLVM < 0x0306 + struct r600_kernel *kernel = &shader->kernels[pc]; + (void)use_kill; + if (!kernel->code_bo) { + void *p; + struct r600_bytecode *bc = &kernel->bc; + LLVMModuleRef mod = kernel->llvm_module; + boolean use_kill = false; + bool dump = (ctx->screen->b.debug_flags & DBG_CS) != 0; + unsigned use_sb = ctx->screen->b.debug_flags & DBG_SB_CS; + unsigned sb_disasm = use_sb || + (ctx->screen->b.debug_flags & DBG_SB_DISASM); + + r600_bytecode_init(bc, ctx->b.chip_class, ctx->b.family, + ctx->screen->has_compressed_msaa_texturing); + bc->type = TGSI_PROCESSOR_COMPUTE; + bc->isa = ctx->isa; + r600_llvm_compile(mod, ctx->b.family, bc, &use_kill, dump); + + if (dump && !sb_disasm) { + r600_bytecode_disasm(bc); + } else if ((dump && sb_disasm) || use_sb) { + if (r600_sb_bytecode_process(ctx, bc, NULL, dump, use_sb)) + R600_ERR("r600_sb_bytecode_process failed!\n"); + } + + kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen, + kernel->bc.ndw * 4); + p = r600_buffer_map_sync_with_rings(&ctx->b, kernel->code_bo, PIPE_TRANSFER_WRITE); + memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4); + ctx->b.ws->buffer_unmap(kernel->code_bo->cs_buf); + } + shader->active_kernel = kernel; + ctx->cs_shader_state.kernel_index = pc; +#else + ctx->cs_shader_state.pc = pc; + /* Get the config information for this kernel. */ + r600_shader_binary_read_config(&shader->binary, &shader->bc, pc, &use_kill); +#endif #endif - ctx->cs_shader_state.kernel_index = pc; + COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc); + + evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input); compute_emit_cs(ctx, block_layout, grid_layout); } @@ -507,10 +634,10 @@ static void evergreen_set_compute_resources(struct pipe_context * ctx_, struct r600_context *ctx = (struct r600_context *)ctx_; struct r600_surface **resources = (struct r600_surface **)surfaces; - COMPUTE_DBG("*** evergreen_set_compute_resources: start = %u count = %u\n", + COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n", start, count); - for (int i = 0; i < count; i++) { + for (unsigned i = 0; i < count; i++) { /* The First two vertex buffers are reserved for parameters and * global buffers. */ unsigned vtx_id = 2 + i; @@ -534,42 +661,6 @@ static void evergreen_set_compute_resources(struct pipe_context * ctx_, } } -static void evergreen_set_cs_sampler_view(struct pipe_context *ctx_, - unsigned start_slot, unsigned count, - struct pipe_sampler_view **views) -{ - struct r600_context *ctx = (struct r600_context *)ctx_; - struct r600_pipe_sampler_view **resource = - (struct r600_pipe_sampler_view **)views; - - for (int i = 0; i < count; i++) { - if (resource[i]) { - assert(i+1 < 12); - ///FETCH0 = VTX0 (param buffer), - //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX - evergreen_set_tex_resource(ctx->cs_shader_state.shader, resource[i], i+2); - } - } -} - -static void evergreen_bind_compute_sampler_states( - struct pipe_context *ctx_, - unsigned start_slot, - unsigned num_samplers, - void **samplers_) -{ - struct r600_context *ctx = (struct r600_context *)ctx_; - struct compute_sampler_state ** samplers = - (struct compute_sampler_state **)samplers_; - - for (int i = 0; i < num_samplers; i++) { - if (samplers[i]) { - evergreen_set_sampler_resource( - ctx->cs_shader_state.shader, samplers[i], i); - } - } -} - static void evergreen_set_global_binding( struct pipe_context *ctx_, unsigned first, unsigned n, struct pipe_resource **resources, @@ -579,8 +670,9 @@ static void evergreen_set_global_binding( struct compute_memory_pool *pool = ctx->screen->global_pool; struct r600_resource_global **buffers = (struct r600_resource_global **)resources; + unsigned i; - COMPUTE_DBG("*** evergreen_set_global_binding first = %u n = %u\n", + COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n", first, n); if (!resources) { @@ -588,14 +680,31 @@ static void evergreen_set_global_binding( return; } - compute_memory_finalize_pending(pool, ctx_); + /* We mark these items for promotion to the pool if they + * aren't already there */ + for (i = first; i < first + n; i++) { + struct compute_memory_item *item = buffers[i]->chunk; + + if (!is_item_in_pool(item)) + buffers[i]->chunk->status |= ITEM_FOR_PROMOTING; + } - for (int i = 0; i < n; i++) + if (compute_memory_finalize_pending(pool, ctx_) == -1) { + /* XXX: Unset */ + return; + } + + for (i = first; i < first + n; i++) { + uint32_t buffer_offset; + uint32_t handle; assert(resources[i]->target == PIPE_BUFFER); assert(resources[i]->bind & PIPE_BIND_GLOBAL); - *(handles[i]) = buffers[i]->chunk->start_in_dw * 4; + buffer_offset = util_le32_to_cpu(*(handles[i])); + handle = buffer_offset + buffers[i]->chunk->start_in_dw * 4; + + *(handles[i]) = util_cpu_to_le32(handle); } evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4); @@ -626,7 +735,16 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) r600_init_command_buffer(cb, 256); cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE; - switch (ctx->family) { + /* This must be first. */ + r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); + r600_store_value(cb, 0x80000000); + r600_store_value(cb, 0x80000000); + + /* We're setting config registers here. */ + r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); + r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); + + switch (ctx->b.family) { case CHIP_CEDAR: default: num_threads = 128; @@ -672,14 +790,18 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) } /* Config Registers */ - evergreen_init_common_regs(cb, ctx->chip_class - , ctx->family, ctx->screen->info.drm_minor); + if (ctx->b.chip_class < CAYMAN) + evergreen_init_common_regs(cb, ctx->b.chip_class, ctx->b.family, + ctx->screen->b.info.drm_minor); + else + cayman_init_common_regs(cb, ctx->b.chip_class, ctx->b.family, + ctx->screen->b.info.drm_minor); /* The primitive type always needs to be POINTLIST for compute. */ r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST); - if (ctx->chip_class < CAYMAN) { + if (ctx->b.chip_class < CAYMAN) { /* These registers control which simds can be used by each stage. * The default for these registers is 0xffffffff, which means @@ -723,10 +845,24 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) r600_store_value(cb, S_008C28_NUM_LS_STACK_ENTRIES(num_stack_entries)); } + /* Give the compute shader all the available LDS space. + * NOTE: This only sets the maximum number of dwords that a compute + * shader can allocate. When a shader is executed, we still need to + * allocate the appropriate amount of LDS dwords using the + * CM_R_0288E8_SQ_LDS_ALLOC register. + */ + if (ctx->b.chip_class < CAYMAN) { + r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT, + S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192)); + } else { + r600_store_context_reg(cb, CM_R_0286FC_SPI_LDS_MGMT, + S_0286FC_NUM_PS_LDS(0) | + S_0286FC_NUM_LS_LDS(255)); /* 255 * 32 = 8160 dwords */ + } /* Context Registers */ - if (ctx->chip_class < CAYMAN) { + if (ctx->b.chip_class < CAYMAN) { /* workaround for hw issues with dyn gpr - must set all limits * to 240 instead of 0, 0x1e == 240 / 8 */ @@ -770,39 +906,36 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) void evergreen_init_compute_state_functions(struct r600_context *ctx) { - ctx->context.create_compute_state = evergreen_create_compute_state; - ctx->context.delete_compute_state = evergreen_delete_compute_state; - ctx->context.bind_compute_state = evergreen_bind_compute_state; + ctx->b.b.create_compute_state = evergreen_create_compute_state; + ctx->b.b.delete_compute_state = evergreen_delete_compute_state; + ctx->b.b.bind_compute_state = evergreen_bind_compute_state; // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; - ctx->context.set_compute_resources = evergreen_set_compute_resources; - ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view; - ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states; - ctx->context.set_global_binding = evergreen_set_global_binding; - ctx->context.launch_grid = evergreen_launch_grid; - - /* We always use at least two vertex buffers for compute, one for - * parameters and one for global memory */ - ctx->cs_vertex_buffer_state.enabled_mask = - ctx->cs_vertex_buffer_state.dirty_mask = 1 | 2; -} + ctx->b.b.set_compute_resources = evergreen_set_compute_resources; + ctx->b.b.set_global_binding = evergreen_set_global_binding; + ctx->b.b.launch_grid = evergreen_launch_grid; +} struct pipe_resource *r600_compute_global_buffer_create( struct pipe_screen *screen, const struct pipe_resource *templ) { + struct r600_resource_global* result = NULL; + struct r600_screen* rscreen = NULL; + int size_in_dw = 0; + assert(templ->target == PIPE_BUFFER); assert(templ->bind & PIPE_BIND_GLOBAL); assert(templ->array_size == 1 || templ->array_size == 0); assert(templ->depth0 == 1 || templ->depth0 == 0); assert(templ->height0 == 1 || templ->height0 == 0); - struct r600_resource_global* result = (struct r600_resource_global*) - CALLOC(sizeof(struct r600_resource_global), 1); - struct r600_screen* rscreen = (struct r600_screen*)screen; + result = (struct r600_resource_global*) + CALLOC(sizeof(struct r600_resource_global), 1); + rscreen = (struct r600_screen*)screen; - COMPUTE_DBG("*** r600_compute_global_buffer_create\n"); - COMPUTE_DBG("width = %u array_size = %u\n", templ->width0, + COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n"); + COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0, templ->array_size); result->base.b.vtbl = &r600_global_buffer_vtbl; @@ -810,7 +943,7 @@ struct pipe_resource *r600_compute_global_buffer_create( result->base.b.b = *templ; pipe_reference_init(&result->base.b.b.reference, 1); - int size_in_dw = (templ->width0+3) / 4; + size_in_dw = (templ->width0+3) / 4; result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw); @@ -827,11 +960,14 @@ void r600_compute_global_buffer_destroy( struct pipe_screen *screen, struct pipe_resource *res) { + struct r600_resource_global* buffer = NULL; + struct r600_screen* rscreen = NULL; + assert(res->target == PIPE_BUFFER); assert(res->bind & PIPE_BIND_GLOBAL); - struct r600_resource_global* buffer = (struct r600_resource_global*)res; - struct r600_screen* rscreen = (struct r600_screen*)screen; + buffer = (struct r600_resource_global*)res; + rscreen = (struct r600_screen*)screen; compute_memory_free(rscreen->global_pool, buffer->chunk->id); @@ -849,66 +985,63 @@ void *r600_compute_global_transfer_map( { struct r600_context *rctx = (struct r600_context*)ctx_; struct compute_memory_pool *pool = rctx->screen->global_pool; - struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers); struct r600_resource_global* buffer = (struct r600_resource_global*)resource; - uint32_t* map; - compute_memory_finalize_pending(pool, ctx_); + struct compute_memory_item *item = buffer->chunk; + struct pipe_resource *dst = NULL; + unsigned offset = box->x; - assert(resource->target == PIPE_BUFFER); + if (is_item_in_pool(item)) { + compute_memory_demote_item(pool, item, ctx_); + } + else { + if (item->real_buffer == NULL) { + item->real_buffer = (struct r600_resource*) + r600_compute_buffer_alloc_vram(pool->screen, item->size_in_dw * 4); + } + } + + dst = (struct pipe_resource*)item->real_buffer; - COMPUTE_DBG("* r600_compute_global_get_transfer()\n" + if (usage & PIPE_TRANSFER_READ) + buffer->chunk->status |= ITEM_MAPPED_FOR_READING; + + COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n" "level = %u, usage = %u, box(x = %u, y = %u, z = %u " "width = %u, height = %u, depth = %u)\n", level, usage, box->x, box->y, box->z, box->width, box->height, box->depth); + COMPUTE_DBG(rctx->screen, "Buffer id = %"PRIi64" offset = " + "%u (box.x)\n", item->id, box->x); - transfer->resource = resource; - transfer->level = level; - transfer->usage = usage; - transfer->box = *box; - transfer->stride = 0; - transfer->layer_stride = 0; - assert(transfer->resource->target == PIPE_BUFFER); - assert(transfer->resource->bind & PIPE_BIND_GLOBAL); - assert(transfer->box.x >= 0); - assert(transfer->box.y == 0); - assert(transfer->box.z == 0); + assert(resource->target == PIPE_BUFFER); + assert(resource->bind & PIPE_BIND_GLOBAL); + assert(box->x >= 0); + assert(box->y == 0); + assert(box->z == 0); ///TODO: do it better, mapping is not possible if the pool is too big - - COMPUTE_DBG("* r600_compute_global_transfer_map()\n"); - - if (!(map = rctx->ws->buffer_map(buffer->chunk->pool->bo->cs_buf, - rctx->cs, transfer->usage))) { - util_slab_free(&rctx->pool_transfers, transfer); - return NULL; - } - - *ptransfer = transfer; - - COMPUTE_DBG("Buffer: %p + %u (buffer offset in global memory) " - "+ %u (box.x)\n", map, buffer->chunk->start_in_dw, transfer->box.x); - return ((char*)(map + buffer->chunk->start_in_dw)) + transfer->box.x; + return pipe_buffer_map_range(ctx_, dst, + offset, box->width, usage, ptransfer); } void r600_compute_global_transfer_unmap( struct pipe_context *ctx_, struct pipe_transfer* transfer) { - assert(transfer->resource->target == PIPE_BUFFER); - assert(transfer->resource->bind & PIPE_BIND_GLOBAL); - - struct r600_context *ctx = (struct r600_context *)ctx_; - struct r600_resource_global* buffer = - (struct r600_resource_global*)transfer->resource; - - COMPUTE_DBG("* r600_compute_global_transfer_unmap()\n"); - - ctx->ws->buffer_unmap(buffer->chunk->pool->bo->cs_buf); - util_slab_free(&ctx->pool_transfers, transfer); + /* struct r600_resource_global are not real resources, they just map + * to an offset within the compute memory pool. The function + * r600_compute_global_transfer_map() maps the memory pool + * resource rather than the struct r600_resource_global passed to + * it as an argument and then initalizes ptransfer->resource with + * the memory pool resource (via pipe_buffer_map_range). + * When transfer_unmap is called it uses the memory pool's + * vtable which calls r600_buffer_transfer_map() rather than + * this function. + */ + assert (!"This function should not be called"); } void r600_compute_global_transfer_flush_region(