X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fr600%2Fevergreen_compute.c;h=45fba0077873a390a7ab720587ca30fb7e60eb82;hb=8cfec333c08a9518735d261cc9d6a96d64276f1c;hp=dc17c058e2cd574140357d49eb44f48b49e275ae;hpb=8252847b7b283ed7f7c514ce44246a2926a0add2;p=mesa.git diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c index dc17c058e2c..45fba007787 100644 --- a/src/gallium/drivers/r600/evergreen_compute.c +++ b/src/gallium/drivers/r600/evergreen_compute.c @@ -24,22 +24,26 @@ * Adam Rak */ +#ifdef HAVE_OPENCL +#include +#include +#endif #include #include #include "pipe/p_defines.h" #include "pipe/p_state.h" #include "pipe/p_context.h" #include "util/u_blitter.h" -#include "util/u_double_list.h" +#include "util/list.h" #include "util/u_transfer.h" #include "util/u_surface.h" #include "util/u_pack_color.h" #include "util/u_memory.h" #include "util/u_inlines.h" #include "util/u_framebuffer.h" +#include "tgsi/tgsi_parse.h" #include "pipebuffer/pb_buffer.h" #include "evergreend.h" -#include "r600_resource.h" #include "r600_shader.h" #include "r600_pipe.h" #include "r600_formats.h" @@ -47,9 +51,7 @@ #include "evergreen_compute_internal.h" #include "compute_memory_pool.h" #include "sb/sb_public.h" -#ifdef HAVE_OPENCL -#include "radeon_llvm_util.h" -#endif +#include /** RAT0 is for global binding write @@ -82,29 +84,24 @@ writable images will consume TEX slots, VTX slots too because of linear indexing */ -struct r600_resource* r600_compute_buffer_alloc_vram( - struct r600_screen *screen, - unsigned size) +struct r600_resource *r600_compute_buffer_alloc_vram(struct r600_screen *screen, + unsigned size) { - struct pipe_resource * buffer = NULL; + struct pipe_resource *buffer = NULL; assert(size); - buffer = pipe_buffer_create( - (struct pipe_screen*) screen, - PIPE_BIND_CUSTOM, - PIPE_USAGE_IMMUTABLE, - size); + buffer = pipe_buffer_create((struct pipe_screen*) screen, + 0, PIPE_USAGE_IMMUTABLE, size); return (struct r600_resource *)buffer; } -static void evergreen_set_rat( - struct r600_pipe_compute *pipe, - int id, - struct r600_resource* bo, - int start, - int size) +static void evergreen_set_rat(struct r600_pipe_compute *pipe, + unsigned id, + struct r600_resource *bo, + int start, + int size) { struct pipe_surface rat_templ; struct r600_surface *surf = NULL; @@ -144,33 +141,31 @@ static void evergreen_set_rat( evergreen_init_color_surface_rat(rctx, surf); } -static void evergreen_cs_set_vertex_buffer( - struct r600_context * rctx, - unsigned vb_index, - unsigned offset, - struct pipe_resource * buffer) +static void evergreen_cs_set_vertex_buffer(struct r600_context *rctx, + unsigned vb_index, + unsigned offset, + struct pipe_resource *buffer) { struct r600_vertexbuf_state *state = &rctx->cs_vertex_buffer_state; struct pipe_vertex_buffer *vb = &state->vb[vb_index]; vb->stride = 1; vb->buffer_offset = offset; - vb->buffer = buffer; - vb->user_buffer = NULL; + vb->buffer.resource = buffer; + vb->is_user_buffer = false; /* The vertex instructions in the compute shaders use the texture cache, * so we need to invalidate it. */ rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE; state->enabled_mask |= 1 << vb_index; state->dirty_mask |= 1 << vb_index; - state->atom.dirty = true; + r600_mark_atom_dirty(rctx, &state->atom); } -static void evergreen_cs_set_constant_buffer( - struct r600_context * rctx, - unsigned cb_index, - unsigned offset, - unsigned size, - struct pipe_resource * buffer) +static void evergreen_cs_set_constant_buffer(struct r600_context *rctx, + unsigned cb_index, + unsigned offset, + unsigned size, + struct pipe_resource *buffer) { struct pipe_constant_buffer cb; cb.buffer_size = size; @@ -181,67 +176,318 @@ static void evergreen_cs_set_constant_buffer( rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, &cb); } -static const struct u_resource_vtbl r600_global_buffer_vtbl = +/* We need to define these R600 registers here, because we can't include + * evergreend.h and r600d.h. + */ +#define R_028868_SQ_PGM_RESOURCES_VS 0x028868 +#define R_028850_SQ_PGM_RESOURCES_PS 0x028850 + +#ifdef HAVE_OPENCL +static void parse_symbol_table(Elf_Data *symbol_table_data, + const GElf_Shdr *symbol_table_header, + struct ac_shader_binary *binary) { - u_default_resource_get_handle, /* get_handle */ - r600_compute_global_buffer_destroy, /* resource_destroy */ - r600_compute_global_transfer_map, /* transfer_map */ - r600_compute_global_transfer_flush_region,/* transfer_flush_region */ - r600_compute_global_transfer_unmap, /* transfer_unmap */ - r600_compute_global_transfer_inline_write /* transfer_inline_write */ -}; + GElf_Sym symbol; + unsigned i = 0; + unsigned symbol_count = + symbol_table_header->sh_size / symbol_table_header->sh_entsize; + + /* We are over allocating this list, because symbol_count gives the + * total number of symbols, and we will only be filling the list + * with offsets of global symbols. The memory savings from + * allocating the correct size of this list will be small, and + * I don't think it is worth the cost of pre-computing the number + * of global symbols. + */ + binary->global_symbol_offsets = CALLOC(symbol_count, sizeof(uint64_t)); + while (gelf_getsym(symbol_table_data, i++, &symbol)) { + unsigned i; + if (GELF_ST_BIND(symbol.st_info) != STB_GLOBAL || + symbol.st_shndx == 0 /* Undefined symbol */) { + continue; + } + + binary->global_symbol_offsets[binary->global_symbol_count] = + symbol.st_value; -void *evergreen_create_compute_state( - struct pipe_context *ctx_, - const const struct pipe_compute_state *cso) + /* Sort the list using bubble sort. This list will usually + * be small. */ + for (i = binary->global_symbol_count; i > 0; --i) { + uint64_t lhs = binary->global_symbol_offsets[i - 1]; + uint64_t rhs = binary->global_symbol_offsets[i]; + if (lhs < rhs) { + break; + } + binary->global_symbol_offsets[i] = lhs; + binary->global_symbol_offsets[i - 1] = rhs; + } + ++binary->global_symbol_count; + } +} + + +static void parse_relocs(Elf *elf, Elf_Data *relocs, Elf_Data *symbols, + unsigned symbol_sh_link, + struct ac_shader_binary *binary) { - struct r600_context *ctx = (struct r600_context *)ctx_; - struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute); + unsigned i; -#ifdef HAVE_OPENCL - const struct pipe_llvm_program_header * header; - const unsigned char * code; + if (!relocs || !symbols || !binary->reloc_count) { + return; + } + binary->relocs = CALLOC(binary->reloc_count, + sizeof(struct ac_shader_reloc)); + for (i = 0; i < binary->reloc_count; i++) { + GElf_Sym symbol; + GElf_Rel rel; + char *symbol_name; + struct ac_shader_reloc *reloc = &binary->relocs[i]; + + gelf_getrel(relocs, i, &rel); + gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &symbol); + symbol_name = elf_strptr(elf, symbol_sh_link, symbol.st_name); + + reloc->offset = rel.r_offset; + strncpy(reloc->name, symbol_name, sizeof(reloc->name)-1); + reloc->name[sizeof(reloc->name)-1] = 0; + } +} + +static void r600_elf_read(const char *elf_data, unsigned elf_size, + struct ac_shader_binary *binary) +{ + char *elf_buffer; + Elf *elf; + Elf_Scn *section = NULL; + Elf_Data *symbols = NULL, *relocs = NULL; + size_t section_str_index; + unsigned symbol_sh_link = 0; + + /* One of the libelf implementations + * (http://www.mr511.de/software/english.htm) requires calling + * elf_version() before elf_memory(). + */ + elf_version(EV_CURRENT); + elf_buffer = MALLOC(elf_size); + memcpy(elf_buffer, elf_data, elf_size); + + elf = elf_memory(elf_buffer, elf_size); + + elf_getshdrstrndx(elf, §ion_str_index); + + while ((section = elf_nextscn(elf, section))) { + const char *name; + Elf_Data *section_data = NULL; + GElf_Shdr section_header; + if (gelf_getshdr(section, §ion_header) != §ion_header) { + fprintf(stderr, "Failed to read ELF section header\n"); + return; + } + name = elf_strptr(elf, section_str_index, section_header.sh_name); + if (!strcmp(name, ".text")) { + section_data = elf_getdata(section, section_data); + binary->code_size = section_data->d_size; + binary->code = MALLOC(binary->code_size * sizeof(unsigned char)); + memcpy(binary->code, section_data->d_buf, binary->code_size); + } else if (!strcmp(name, ".AMDGPU.config")) { + section_data = elf_getdata(section, section_data); + binary->config_size = section_data->d_size; + binary->config = MALLOC(binary->config_size * sizeof(unsigned char)); + memcpy(binary->config, section_data->d_buf, binary->config_size); + } else if (!strcmp(name, ".AMDGPU.disasm")) { + /* Always read disassembly if it's available. */ + section_data = elf_getdata(section, section_data); + binary->disasm_string = strndup(section_data->d_buf, + section_data->d_size); + } else if (!strncmp(name, ".rodata", 7)) { + section_data = elf_getdata(section, section_data); + binary->rodata_size = section_data->d_size; + binary->rodata = MALLOC(binary->rodata_size * sizeof(unsigned char)); + memcpy(binary->rodata, section_data->d_buf, binary->rodata_size); + } else if (!strncmp(name, ".symtab", 7)) { + symbols = elf_getdata(section, section_data); + symbol_sh_link = section_header.sh_link; + parse_symbol_table(symbols, §ion_header, binary); + } else if (!strcmp(name, ".rel.text")) { + relocs = elf_getdata(section, section_data); + binary->reloc_count = section_header.sh_size / + section_header.sh_entsize; + } + } + + parse_relocs(elf, relocs, symbols, symbol_sh_link, binary); + + if (elf){ + elf_end(elf); + } + FREE(elf_buffer); + + /* Cache the config size per symbol */ + if (binary->global_symbol_count) { + binary->config_size_per_symbol = + binary->config_size / binary->global_symbol_count; + } else { + binary->global_symbol_count = 1; + binary->config_size_per_symbol = binary->config_size; + } +} + +static const unsigned char *r600_shader_binary_config_start( + const struct ac_shader_binary *binary, + uint64_t symbol_offset) +{ unsigned i; + for (i = 0; i < binary->global_symbol_count; ++i) { + if (binary->global_symbol_offsets[i] == symbol_offset) { + unsigned offset = i * binary->config_size_per_symbol; + return binary->config + offset; + } + } + return binary->config; +} - COMPUTE_DBG(ctx->screen, "*** evergreen_create_compute_state\n"); +static void r600_shader_binary_read_config(const struct ac_shader_binary *binary, + struct r600_bytecode *bc, + uint64_t symbol_offset, + boolean *use_kill) +{ + unsigned i; + const unsigned char *config = + r600_shader_binary_config_start(binary, symbol_offset); + + for (i = 0; i < binary->config_size_per_symbol; i+= 8) { + unsigned reg = + util_le32_to_cpu(*(uint32_t*)(config + i)); + unsigned value = + util_le32_to_cpu(*(uint32_t*)(config + i + 4)); + switch (reg) { + /* R600 / R700 */ + case R_028850_SQ_PGM_RESOURCES_PS: + case R_028868_SQ_PGM_RESOURCES_VS: + /* Evergreen / Northern Islands */ + case R_028844_SQ_PGM_RESOURCES_PS: + case R_028860_SQ_PGM_RESOURCES_VS: + case R_0288D4_SQ_PGM_RESOURCES_LS: + bc->ngpr = MAX2(bc->ngpr, G_028844_NUM_GPRS(value)); + bc->nstack = MAX2(bc->nstack, G_028844_STACK_SIZE(value)); + break; + case R_02880C_DB_SHADER_CONTROL: + *use_kill = G_02880C_KILL_ENABLE(value); + break; + case R_0288E8_SQ_LDS_ALLOC: + bc->nlds_dw = value; + break; + } + } +} - header = cso->prog; - code = cso->prog + sizeof(struct pipe_llvm_program_header); +static unsigned r600_create_shader(struct r600_bytecode *bc, + const struct ac_shader_binary *binary, + boolean *use_kill) + +{ + assert(binary->code_size % 4 == 0); + bc->bytecode = CALLOC(1, binary->code_size); + memcpy(bc->bytecode, binary->code, binary->code_size); + bc->ndw = binary->code_size / 4; + + r600_shader_binary_read_config(binary, bc, 0, use_kill); + return 0; +} + +#endif + +static void r600_destroy_shader(struct r600_bytecode *bc) +{ + FREE(bc->bytecode); +} + +static void *evergreen_create_compute_state(struct pipe_context *ctx, + const struct pipe_compute_state *cso) +{ + struct r600_context *rctx = (struct r600_context *)ctx; + struct r600_pipe_compute *shader = CALLOC_STRUCT(r600_pipe_compute); +#ifdef HAVE_OPENCL + const struct pipe_llvm_program_header *header; + const char *code; + void *p; + boolean use_kill; #endif - shader->ctx = (struct r600_context*)ctx; + shader->ctx = rctx; shader->local_size = cso->req_local_mem; shader->private_size = cso->req_private_mem; shader->input_size = cso->req_input_mem; -#ifdef HAVE_OPENCL - shader->num_kernels = radeon_llvm_get_num_kernels(code, header->num_bytes); - shader->kernels = CALLOC(sizeof(struct r600_kernel), shader->num_kernels); + shader->ir_type = cso->ir_type; - for (i = 0; i < shader->num_kernels; i++) { - struct r600_kernel *kernel = &shader->kernels[i]; - kernel->llvm_module = radeon_llvm_get_kernel_module(i, code, - header->num_bytes); + if (shader->ir_type == PIPE_SHADER_IR_TGSI) { + shader->sel = r600_create_shader_state_tokens(ctx, cso->prog, PIPE_SHADER_COMPUTE); + return shader; } +#ifdef HAVE_OPENCL + COMPUTE_DBG(rctx->screen, "*** evergreen_create_compute_state\n"); + header = cso->prog; + code = cso->prog + sizeof(struct pipe_llvm_program_header); + radeon_shader_binary_init(&shader->binary); + r600_elf_read(code, header->num_bytes, &shader->binary); + r600_create_shader(&shader->bc, &shader->binary, &use_kill); + + /* Upload code + ROdata */ + shader->code_bo = r600_compute_buffer_alloc_vram(rctx->screen, + shader->bc.ndw * 4); + p = r600_buffer_map_sync_with_rings(&rctx->b, shader->code_bo, PIPE_TRANSFER_WRITE); + //TODO: use util_memcpy_cpu_to_le32 ? + memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4); + rctx->b.ws->buffer_unmap(shader->code_bo->buf); #endif + return shader; } -void evergreen_delete_compute_state(struct pipe_context *ctx, void* state) +static void evergreen_delete_compute_state(struct pipe_context *ctx, void *state) { - struct r600_pipe_compute *shader = (struct r600_pipe_compute *)state; + struct r600_context *rctx = (struct r600_context *)ctx; + struct r600_pipe_compute *shader = state; - free(shader); + COMPUTE_DBG(rctx->screen, "*** evergreen_delete_compute_state\n"); + + if (!shader) + return; + + if (shader->ir_type == PIPE_SHADER_IR_TGSI) { + r600_delete_shader_selector(ctx, shader->sel); + } else { +#ifdef HAVE_OPENCL + radeon_shader_binary_clean(&shader->binary); +#endif + r600_destroy_shader(&shader->bc); + + /* TODO destroy shader->code_bo, shader->const_bo + * we'll need something like r600_buffer_free */ + } + FREE(shader); } -static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state) +static void evergreen_bind_compute_state(struct pipe_context *ctx, void *state) { - struct r600_context *ctx = (struct r600_context *)ctx_; + struct r600_context *rctx = (struct r600_context *)ctx; + struct r600_pipe_compute *cstate = (struct r600_pipe_compute *)state; + COMPUTE_DBG(rctx->screen, "*** evergreen_bind_compute_state\n"); - COMPUTE_DBG(ctx->screen, "*** evergreen_bind_compute_state\n"); + if (!state) { + rctx->cs_shader_state.shader = (struct r600_pipe_compute *)state; + return; + } - ctx->cs_shader_state.shader = (struct r600_pipe_compute *)state; + if (cstate->ir_type == PIPE_SHADER_IR_TGSI) { + bool compute_dirty; + + r600_shader_select(ctx, cstate->sel, &compute_dirty); + } + + rctx->cs_shader_state.shader = (struct r600_pipe_compute *)state; } /* The kernel parameters are stored a vtx buffer (ID=0), besides the explicit @@ -255,39 +501,38 @@ static void evergreen_bind_compute_state(struct pipe_context *ctx_, void *state) * (x,y,z) * DWORDS 9+ : Kernel parameters */ -void evergreen_compute_upload_input( - struct pipe_context *ctx_, - const uint *block_layout, - const uint *grid_layout, - const void *input) +static void evergreen_compute_upload_input(struct pipe_context *ctx, + const struct pipe_grid_info *info) { - struct r600_context *ctx = (struct r600_context *)ctx_; - struct r600_pipe_compute *shader = ctx->cs_shader_state.shader; - int i; + struct r600_context *rctx = (struct r600_context *)ctx; + struct r600_pipe_compute *shader = rctx->cs_shader_state.shader; + unsigned i; /* We need to reserve 9 dwords (36 bytes) for implicit kernel * parameters. */ - unsigned input_size = shader->input_size + 36; - uint32_t * num_work_groups_start; - uint32_t * global_size_start; - uint32_t * local_size_start; - uint32_t * kernel_parameters_start; + unsigned input_size; + uint32_t *num_work_groups_start; + uint32_t *global_size_start; + uint32_t *local_size_start; + uint32_t *kernel_parameters_start; struct pipe_box box; struct pipe_transfer *transfer = NULL; + if (!shader) + return; if (shader->input_size == 0) { return; } - + input_size = shader->input_size + 36; if (!shader->kernel_param) { /* Add space for the grid dimensions */ shader->kernel_param = (struct r600_resource *) - pipe_buffer_create(ctx_->screen, PIPE_BIND_CUSTOM, + pipe_buffer_create(ctx->screen, 0, PIPE_USAGE_IMMUTABLE, input_size); } u_box_1d(0, input_size, &box); - num_work_groups_start = ctx_->transfer_map(ctx_, + num_work_groups_start = ctx->transfer_map(ctx, (struct pipe_resource*)shader->kernel_param, 0, PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE, &box, &transfer); @@ -296,56 +541,62 @@ void evergreen_compute_upload_input( kernel_parameters_start = local_size_start + (3 * (sizeof(uint)) / 4); /* Copy the work group size */ - memcpy(num_work_groups_start, grid_layout, 3 * sizeof(uint)); + memcpy(num_work_groups_start, info->grid, 3 * sizeof(uint)); /* Copy the global size */ for (i = 0; i < 3; i++) { - global_size_start[i] = grid_layout[i] * block_layout[i]; + global_size_start[i] = info->grid[i] * info->block[i]; } /* Copy the local dimensions */ - memcpy(local_size_start, block_layout, 3 * sizeof(uint)); + memcpy(local_size_start, info->block, 3 * sizeof(uint)); /* Copy the kernel inputs */ - memcpy(kernel_parameters_start, input, shader->input_size); + memcpy(kernel_parameters_start, info->input, shader->input_size); for (i = 0; i < (input_size / 4); i++) { - COMPUTE_DBG(ctx->screen, "input %i : %i\n", i, + COMPUTE_DBG(rctx->screen, "input %i : %u\n", i, ((unsigned*)num_work_groups_start)[i]); } - ctx_->transfer_unmap(ctx_, transfer); + ctx->transfer_unmap(ctx, transfer); - /* ID=0 is reserved for the parameters */ - evergreen_cs_set_constant_buffer(ctx, 0, 0, input_size, + /* ID=0 and ID=3 are reserved for the parameters. + * LLVM will preferably use ID=0, but it does not work for dynamic + * indices. */ + evergreen_cs_set_vertex_buffer(rctx, 3, 0, + (struct pipe_resource*)shader->kernel_param); + evergreen_cs_set_constant_buffer(rctx, 0, 0, input_size, (struct pipe_resource*)shader->kernel_param); } -static void evergreen_emit_direct_dispatch( - struct r600_context *rctx, - const uint *block_layout, const uint *grid_layout) +static void evergreen_emit_dispatch(struct r600_context *rctx, + const struct pipe_grid_info *info) { int i; - struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.gfx.cs; struct r600_pipe_compute *shader = rctx->cs_shader_state.shader; unsigned num_waves; - unsigned num_pipes = rctx->screen->b.info.r600_max_pipes; + unsigned num_pipes = rctx->screen->b.info.r600_max_quad_pipes; unsigned wave_divisor = (16 * num_pipes); int group_size = 1; int grid_size = 1; - unsigned lds_size = shader->local_size / 4 + shader->active_kernel->bc.nlds_dw; + unsigned lds_size = shader->local_size / 4; + + if (shader->ir_type != PIPE_SHADER_IR_TGSI) + lds_size += shader->bc.nlds_dw; /* Calculate group_size/grid_size */ for (i = 0; i < 3; i++) { - group_size *= block_layout[i]; + group_size *= info->block[i]; } for (i = 0; i < 3; i++) { - grid_size *= grid_layout[i]; + grid_size *= info->grid[i]; } /* num_waves = ceil((tg_size.x * tg_size.y, tg_size.z) / (16 * num_pipes)) */ - num_waves = (block_layout[0] * block_layout[1] * block_layout[2] + + num_waves = (info->block[0] * info->block[1] * info->block[2] + wave_divisor - 1) / wave_divisor; COMPUTE_DBG(rctx->screen, "Using %u pipes, " @@ -353,20 +604,20 @@ static void evergreen_emit_direct_dispatch( "allocating %u dwords lds.\n", num_pipes, num_waves, lds_size); - r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size); + radeon_set_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size); - r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3); + radeon_set_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3); radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */ radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */ radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */ - r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, + radeon_set_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size); - r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3); - radeon_emit(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ - radeon_emit(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ - radeon_emit(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ + radeon_compute_set_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3); + radeon_emit(cs, info->block[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ + radeon_emit(cs, info->block[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ + radeon_emit(cs, info->block[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ if (rctx->b.chip_class < CAYMAN) { assert(lds_size <= 8192); @@ -376,48 +627,36 @@ static void evergreen_emit_direct_dispatch( assert(lds_size <= 8160); } - r600_write_compute_context_reg(cs, CM_R_0288E8_SQ_LDS_ALLOC, + radeon_compute_set_context_reg(cs, R_0288E8_SQ_LDS_ALLOC, lds_size | (num_waves << 14)); /* Dispatch packet */ radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0)); - radeon_emit(cs, grid_layout[0]); - radeon_emit(cs, grid_layout[1]); - radeon_emit(cs, grid_layout[2]); + radeon_emit(cs, info->grid[0]); + radeon_emit(cs, info->grid[1]); + radeon_emit(cs, info->grid[2]); /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */ radeon_emit(cs, 1); + + if (rctx->is_debug) + eg_trace_emit(rctx); } -static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, - const uint *grid_layout) +static void compute_setup_cbs(struct r600_context *rctx) { - struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; - int i; - - /* make sure that the gfx ring is only one active */ - if (ctx->b.rings.dma.cs) { - ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); - } - - /* Initialize all the compute-related registers. - * - * See evergreen_init_atom_start_compute_cs() in this file for the list - * of registers initialized by the start_compute_cs_cmd atom. - */ - r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd); - - ctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; - r600_flush_emit(ctx); + struct radeon_winsys_cs *cs = rctx->b.gfx.cs; + unsigned i; /* Emit colorbuffers. */ /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */ - for (i = 0; i < 8 && i < ctx->framebuffer.state.nr_cbufs; i++) { - struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i]; - unsigned reloc = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, + for (i = 0; i < 8 && i < rctx->framebuffer.state.nr_cbufs; i++) { + struct r600_surface *cb = (struct r600_surface*)rctx->framebuffer.state.cbufs[i]; + unsigned reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, (struct r600_resource*)cb->base.texture, - RADEON_USAGE_READWRITE); + RADEON_USAGE_READWRITE, + RADEON_PRIO_SHADER_RW_BUFFER); - r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); + radeon_compute_set_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ @@ -429,59 +668,155 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ radeon_emit(cs, reloc); - if (!ctx->keep_tiling_flags) { - radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ - radeon_emit(cs, reloc); - } - radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ radeon_emit(cs, reloc); } - if (ctx->keep_tiling_flags) { - for (; i < 8 ; i++) { - r600_write_compute_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, - S_028C70_FORMAT(V_028C70_COLOR_INVALID)); + for (; i < 8 ; i++) + radeon_compute_set_context_reg(cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, + S_028C70_FORMAT(V_028C70_COLOR_INVALID)); + for (; i < 12; i++) + radeon_compute_set_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, + S_028C70_FORMAT(V_028C70_COLOR_INVALID)); + + /* Set CB_TARGET_MASK XXX: Use cb_misc_state */ + radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK, + rctx->compute_cb_target_mask); +} + +static void compute_emit_cs(struct r600_context *rctx, + const struct pipe_grid_info *info) +{ + struct radeon_winsys_cs *cs = rctx->b.gfx.cs; + bool compute_dirty = false; + struct r600_pipe_shader *current; + struct r600_shader_atomic combined_atomics[8]; + uint8_t atomic_used_mask; + + /* make sure that the gfx ring is only one active */ + if (radeon_emitted(rctx->b.dma.cs, 0)) { + rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL); + } + + r600_update_compressed_resource_state(rctx, true); + + if (!rctx->cmd_buf_is_compute) { + rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL); + rctx->cmd_buf_is_compute = true; + } + + r600_need_cs_space(rctx, 0, true); + if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) { + r600_shader_select(&rctx->b.b, rctx->cs_shader_state.shader->sel, &compute_dirty); + current = rctx->cs_shader_state.shader->sel->current; + if (compute_dirty) { + rctx->cs_shader_state.atom.num_dw = current->command_buffer.num_dw; + r600_context_add_resource_size(&rctx->b.b, (struct pipe_resource *)current->bo); + r600_set_atom_dirty(rctx, &rctx->cs_shader_state.atom, true); + } + + bool need_buf_const = current->shader.uses_tex_buffers || + current->shader.has_txq_cube_array_z_comp; + + for (int i = 0; i < 3; i++) { + rctx->cs_block_grid_sizes[i] = info->block[i]; + rctx->cs_block_grid_sizes[i + 4] = info->grid[i]; } - for (; i < 12; i++) { - r600_write_compute_context_reg(cs, R_028E50_CB_COLOR8_INFO + (i - 8) * 0x1C, - S_028C70_FORMAT(V_028C70_COLOR_INVALID)); + rctx->cs_block_grid_sizes[3] = rctx->cs_block_grid_sizes[7] = 0; + rctx->driver_consts[PIPE_SHADER_COMPUTE].cs_block_grid_size_dirty = true; + if (need_buf_const) { + eg_setup_buffer_constants(rctx, PIPE_SHADER_COMPUTE); + } + r600_update_driver_const_buffers(rctx, true); + + if (evergreen_emit_atomic_buffer_setup(rctx, current, combined_atomics, &atomic_used_mask)) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); } } - /* Set CB_TARGET_MASK XXX: Use cb_misc_state */ - r600_write_compute_context_reg(cs, R_028238_CB_TARGET_MASK, - ctx->compute_cb_target_mask); + /* Initialize all the compute-related registers. + * + * See evergreen_init_atom_start_compute_cs() in this file for the list + * of registers initialized by the start_compute_cs_cmd atom. + */ + r600_emit_command_buffer(cs, &rctx->start_compute_cs_cmd); + + /* emit config state */ + if (rctx->b.chip_class == EVERGREEN) { + if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) { + radeon_set_config_reg_seq(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, 3); + radeon_emit(cs, S_008C04_NUM_CLAUSE_TEMP_GPRS(rctx->r6xx_num_clause_temp_gprs)); + radeon_emit(cs, 0); + radeon_emit(cs, 0); + radeon_set_config_reg(cs, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, (1 << 8)); + } else + r600_emit_atom(rctx, &rctx->config_state.atom); + } + + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; + r600_flush_emit(rctx); + if (rctx->cs_shader_state.shader->ir_type != PIPE_SHADER_IR_TGSI) { - /* Emit vertex buffer state */ - ctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(ctx->cs_vertex_buffer_state.dirty_mask); - r600_emit_atom(ctx, &ctx->cs_vertex_buffer_state.atom); + compute_setup_cbs(rctx); + + /* Emit vertex buffer state */ + rctx->cs_vertex_buffer_state.atom.num_dw = 12 * util_bitcount(rctx->cs_vertex_buffer_state.dirty_mask); + r600_emit_atom(rctx, &rctx->cs_vertex_buffer_state.atom); + } else { + uint32_t rat_mask; + + rat_mask = ((1ULL << (((unsigned)rctx->cb_misc_state.nr_image_rats + rctx->cb_misc_state.nr_buffer_rats) * 4)) - 1); + radeon_compute_set_context_reg(cs, R_028238_CB_TARGET_MASK, + rat_mask); + } /* Emit constant buffer state */ - r600_emit_atom(ctx, &ctx->constbuf_state[PIPE_SHADER_COMPUTE].atom); + r600_emit_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_COMPUTE].atom); + + /* Emit sampler state */ + r600_emit_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].states.atom); + + /* Emit sampler view (texture resource) state */ + r600_emit_atom(rctx, &rctx->samplers[PIPE_SHADER_COMPUTE].views.atom); - /* Emit compute shader state */ - r600_emit_atom(ctx, &ctx->cs_shader_state.atom); + /* Emit images state */ + r600_emit_atom(rctx, &rctx->compute_images.atom); + + /* Emit buffers state */ + r600_emit_atom(rctx, &rctx->compute_buffers.atom); + + /* Emit shader state */ + r600_emit_atom(rctx, &rctx->cs_shader_state.atom); /* Emit dispatch state and dispatch packet */ - evergreen_emit_direct_dispatch(ctx, block_layout, grid_layout); + evergreen_emit_dispatch(rctx, info); /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff */ - ctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE | + rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE | R600_CONTEXT_INV_VERTEX_CACHE | R600_CONTEXT_INV_TEX_CACHE; - r600_flush_emit(ctx); - ctx->b.flags = 0; - - if (ctx->b.chip_class >= CAYMAN) { - ctx->skip_surface_sync_on_next_cs_flush = true; + r600_flush_emit(rctx); + rctx->b.flags = 0; + + if (rctx->b.chip_class >= CAYMAN) { + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); + /* DEALLOC_STATE prevents the GPU from hanging when a + * SURFACE_SYNC packet is emitted some time after a DISPATCH_DIRECT + * with any of the CB*_DEST_BASE_ENA or DB_DEST_BASE_ENA bits set. + */ + radeon_emit(cs, PKT3C(PKT3_DEALLOC_STATE, 0, 0)); + radeon_emit(cs, 0); } + if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) + evergreen_emit_atomic_buffer_save(rctx, true, combined_atomics, &atomic_used_mask); #if 0 - COMPUTE_DBG(ctx->screen, "cdw: %i\n", cs->cdw); + COMPUTE_DBG(rctx->screen, "cdw: %i\n", cs->cdw); for (i = 0; i < cs->cdw; i++) { - COMPUTE_DBG(ctx->screen, "%4i : 0x%08X\n", i, cs->buf[i]); + COMPUTE_DBG(rctx->screen, "%4i : 0x%08X\n", i, cs->buf[i]); } #endif @@ -491,95 +826,83 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, /** * Emit function for r600_cs_shader_state atom */ -void evergreen_emit_cs_shader( - struct r600_context *rctx, - struct r600_atom *atom) +void evergreen_emit_cs_shader(struct r600_context *rctx, + struct r600_atom *atom) { struct r600_cs_shader_state *state = (struct r600_cs_shader_state*)atom; struct r600_pipe_compute *shader = state->shader; - struct r600_kernel *kernel = &shader->kernels[state->kernel_index]; - struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.gfx.cs; uint64_t va; + struct r600_resource *code_bo; + unsigned ngpr, nstack; + + if (shader->ir_type == PIPE_SHADER_IR_TGSI) { + code_bo = shader->sel->current->bo; + va = shader->sel->current->bo->gpu_address; + ngpr = shader->sel->current->shader.bc.ngpr; + nstack = shader->sel->current->shader.bc.nstack; + } else { + code_bo = shader->code_bo; + va = shader->code_bo->gpu_address + state->pc; + ngpr = shader->bc.ngpr; + nstack = shader->bc.nstack; + } - va = r600_resource_va(&rctx->screen->b.b, &kernel->code_bo->b.b); - - r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3); + radeon_compute_set_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3); radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */ radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */ - S_0288D4_NUM_GPRS(kernel->bc.ngpr) - | S_0288D4_STACK_SIZE(kernel->bc.nstack)); + S_0288D4_NUM_GPRS(ngpr) | + S_0288D4_DX10_CLAMP(1) | + S_0288D4_STACK_SIZE(nstack)); radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */ radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0)); - radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, - kernel->code_bo, RADEON_USAGE_READ)); + radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, + code_bo, RADEON_USAGE_READ, + RADEON_PRIO_SHADER_BINARY)); } -static void evergreen_launch_grid( - struct pipe_context *ctx_, - const uint *block_layout, const uint *grid_layout, - uint32_t pc, const void *input) +static void evergreen_launch_grid(struct pipe_context *ctx, + const struct pipe_grid_info *info) { - struct r600_context *ctx = (struct r600_context *)ctx_; - - struct r600_pipe_compute *shader = ctx->cs_shader_state.shader; - struct r600_kernel *kernel = &shader->kernels[pc]; - - COMPUTE_DBG(ctx->screen, "*** evergreen_launch_grid: pc = %u\n", pc); - + struct r600_context *rctx = (struct r600_context *)ctx; #ifdef HAVE_OPENCL + struct r600_pipe_compute *shader = rctx->cs_shader_state.shader; + boolean use_kill; - if (!kernel->code_bo) { - void *p; - struct r600_bytecode *bc = &kernel->bc; - LLVMModuleRef mod = kernel->llvm_module; - boolean use_kill = false; - bool dump = (ctx->screen->b.debug_flags & DBG_CS) != 0; - unsigned use_sb = ctx->screen->b.debug_flags & DBG_SB_CS; - unsigned sb_disasm = use_sb || - (ctx->screen->b.debug_flags & DBG_SB_DISASM); - - r600_bytecode_init(bc, ctx->b.chip_class, ctx->b.family, - ctx->screen->has_compressed_msaa_texturing); - bc->type = TGSI_PROCESSOR_COMPUTE; - bc->isa = ctx->isa; - r600_llvm_compile(mod, ctx->b.family, bc, &use_kill, dump); - - if (dump && !sb_disasm) { - r600_bytecode_disasm(bc); - } else if ((dump && sb_disasm) || use_sb) { - if (r600_sb_bytecode_process(ctx, bc, NULL, dump, use_sb)) - R600_ERR("r600_sb_bytecode_process failed!\n"); - } - - kernel->code_bo = r600_compute_buffer_alloc_vram(ctx->screen, - kernel->bc.ndw * 4); - p = r600_buffer_map_sync_with_rings(&ctx->b, kernel->code_bo, PIPE_TRANSFER_WRITE); - memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4); - ctx->b.ws->buffer_unmap(kernel->code_bo->cs_buf); + if (shader->ir_type != PIPE_SHADER_IR_TGSI) { + rctx->cs_shader_state.pc = info->pc; + /* Get the config information for this kernel. */ + r600_shader_binary_read_config(&shader->binary, &shader->bc, + info->pc, &use_kill); + } else { + use_kill = false; + rctx->cs_shader_state.pc = 0; } #endif - shader->active_kernel = kernel; - ctx->cs_shader_state.kernel_index = pc; - evergreen_compute_upload_input(ctx_, block_layout, grid_layout, input); - compute_emit_cs(ctx, block_layout, grid_layout); + + COMPUTE_DBG(rctx->screen, "*** evergreen_launch_grid: pc = %u\n", info->pc); + + + evergreen_compute_upload_input(ctx, info); + compute_emit_cs(rctx, info); } -static void evergreen_set_compute_resources(struct pipe_context * ctx_, - unsigned start, unsigned count, - struct pipe_surface ** surfaces) +static void evergreen_set_compute_resources(struct pipe_context *ctx, + unsigned start, unsigned count, + struct pipe_surface **surfaces) { - struct r600_context *ctx = (struct r600_context *)ctx_; + struct r600_context *rctx = (struct r600_context *)ctx; struct r600_surface **resources = (struct r600_surface **)surfaces; - COMPUTE_DBG(ctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n", + COMPUTE_DBG(rctx->screen, "*** evergreen_set_compute_resources: start = %u count = %u\n", start, count); - for (int i = 0; i < count; i++) { - /* The First two vertex buffers are reserved for parameters and + for (unsigned i = 0; i < count; i++) { + /* The First four vertex buffers are reserved for parameters and * global buffers. */ - unsigned vtx_id = 2 + i; + unsigned vtx_id = 4 + i; if (resources[i]) { struct r600_resource_global *buffer = (struct r600_resource_global*) @@ -587,49 +910,31 @@ static void evergreen_set_compute_resources(struct pipe_context * ctx_, if (resources[i]->base.writable) { assert(i+1 < 12); - evergreen_set_rat(ctx->cs_shader_state.shader, i+1, + evergreen_set_rat(rctx->cs_shader_state.shader, i+1, (struct r600_resource *)resources[i]->base.texture, buffer->chunk->start_in_dw*4, resources[i]->base.texture->width0); } - evergreen_cs_set_vertex_buffer(ctx, vtx_id, + evergreen_cs_set_vertex_buffer(rctx, vtx_id, buffer->chunk->start_in_dw * 4, resources[i]->base.texture); } } } -void evergreen_set_cs_sampler_view(struct pipe_context *ctx_, - unsigned start_slot, unsigned count, - struct pipe_sampler_view **views) +static void evergreen_set_global_binding(struct pipe_context *ctx, + unsigned first, unsigned n, + struct pipe_resource **resources, + uint32_t **handles) { - struct r600_pipe_sampler_view **resource = - (struct r600_pipe_sampler_view **)views; - - for (int i = 0; i < count; i++) { - if (resource[i]) { - assert(i+1 < 12); - /* XXX: Implement */ - assert(!"Compute samplers not implemented."); - ///FETCH0 = VTX0 (param buffer), - //FETCH1 = VTX1 (global buffer pool), FETCH2... = TEX - } - } -} - - -static void evergreen_set_global_binding( - struct pipe_context *ctx_, unsigned first, unsigned n, - struct pipe_resource **resources, - uint32_t **handles) -{ - struct r600_context *ctx = (struct r600_context *)ctx_; - struct compute_memory_pool *pool = ctx->screen->global_pool; + struct r600_context *rctx = (struct r600_context *)ctx; + struct compute_memory_pool *pool = rctx->screen->global_pool; struct r600_resource_global **buffers = (struct r600_resource_global **)resources; + unsigned i; - COMPUTE_DBG(ctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n", + COMPUTE_DBG(rctx->screen, "*** evergreen_set_global_binding first = %u n = %u\n", first, n); if (!resources) { @@ -637,19 +942,42 @@ static void evergreen_set_global_binding( return; } - compute_memory_finalize_pending(pool, ctx_); + /* We mark these items for promotion to the pool if they + * aren't already there */ + for (i = first; i < first + n; i++) { + struct compute_memory_item *item = buffers[i]->chunk; + + if (!is_item_in_pool(item)) + buffers[i]->chunk->status |= ITEM_FOR_PROMOTING; + } - for (int i = 0; i < n; i++) + if (compute_memory_finalize_pending(pool, ctx) == -1) { + /* XXX: Unset */ + return; + } + + for (i = first; i < first + n; i++) { + uint32_t buffer_offset; + uint32_t handle; assert(resources[i]->target == PIPE_BUFFER); assert(resources[i]->bind & PIPE_BIND_GLOBAL); - *(handles[i]) = buffers[i]->chunk->start_in_dw * 4; + buffer_offset = util_le32_to_cpu(*(handles[i])); + handle = buffer_offset + buffers[i]->chunk->start_in_dw * 4; + + *(handles[i]) = util_cpu_to_le32(handle); } - evergreen_set_rat(ctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4); - evergreen_cs_set_vertex_buffer(ctx, 1, 0, + /* globals for writing */ + evergreen_set_rat(rctx->cs_shader_state.shader, 0, pool->bo, 0, pool->size_in_dw * 4); + /* globals for reading */ + evergreen_cs_set_vertex_buffer(rctx, 1, 0, (struct pipe_resource*)pool->bo); + + /* constants for reading, LLVM puts them in text segment */ + evergreen_cs_set_vertex_buffer(rctx, 2, 0, + (struct pipe_resource*)rctx->cs_shader_state.shader->code_bo); } /** @@ -659,32 +987,27 @@ static void evergreen_set_global_binding( * command stream by the start_cs_cmd atom. However, since the SET_CONTEXT_REG * packet requires that the shader type bit be set, we must initialize all * context registers needed for compute in this function. The registers - * intialized by the start_cs_cmd atom can be found in evereen_state.c in the + * initialized by the start_cs_cmd atom can be found in evergreen_state.c in the * functions evergreen_init_atom_start_cs or cayman_init_atom_start_cs depending * on the GPU family. */ -void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) +void evergreen_init_atom_start_compute_cs(struct r600_context *rctx) { - struct r600_command_buffer *cb = &ctx->start_compute_cs_cmd; + struct r600_command_buffer *cb = &rctx->start_compute_cs_cmd; int num_threads; int num_stack_entries; - /* since all required registers are initialised in the + /* since all required registers are initialized in the * start_compute_cs_cmd atom, we can EMIT_EARLY here. */ r600_init_command_buffer(cb, 256); cb->pkt_flags = RADEON_CP_PACKET3_COMPUTE_MODE; - /* This must be first. */ - r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); - r600_store_value(cb, 0x80000000); - r600_store_value(cb, 0x80000000); - /* We're setting config registers here. */ r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - switch (ctx->b.family) { + switch (rctx->b.family) { case CHIP_CEDAR: default: num_threads = 128; @@ -729,19 +1052,11 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) break; } - /* Config Registers */ - if (ctx->b.chip_class < CAYMAN) - evergreen_init_common_regs(cb, ctx->b.chip_class, ctx->b.family, - ctx->screen->b.info.drm_minor); - else - cayman_init_common_regs(cb, ctx->b.chip_class, ctx->b.family, - ctx->screen->b.info.drm_minor); - /* The primitive type always needs to be POINTLIST for compute. */ r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST); - if (ctx->b.chip_class < CAYMAN) { + if (rctx->b.chip_class < CAYMAN) { /* These registers control which simds can be used by each stage. * The default for these registers is 0xffffffff, which means @@ -754,7 +1069,7 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) * R_008E28_SQ_STATIC_THREAD_MGMT3 */ - /* XXX: We may need to adjust the thread and stack resouce + /* XXX: We may need to adjust the thread and stack resource * values for 3D/compute interop */ r600_store_config_reg_seq(cb, R_008C18_SQ_THREAD_RESOURCE_MGMT_1, 5); @@ -791,7 +1106,7 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) * allocate the appropriate amount of LDS dwords using the * CM_R_0288E8_SQ_LDS_ALLOC register. */ - if (ctx->b.chip_class < CAYMAN) { + if (rctx->b.chip_class < CAYMAN) { r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT, S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192)); } else { @@ -802,7 +1117,7 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) /* Context Registers */ - if (ctx->b.chip_class < CAYMAN) { + if (rctx->b.chip_class < CAYMAN) { /* workaround for hw issues with dyn gpr - must set all limits * to 240 instead of 0, 0x1e == 240 / 8 */ @@ -822,10 +1137,9 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) r600_store_context_reg(cb, R_028B54_VGT_SHADER_STAGES_EN, 2/*CS_ON*/); r600_store_context_reg(cb, R_0286E8_SPI_COMPUTE_INPUT_CNTL, - S_0286E8_TID_IN_GROUP_ENA - | S_0286E8_TGID_ENA - | S_0286E8_DISABLE_INDEX_PACK) - ; + S_0286E8_TID_IN_GROUP_ENA(1) | + S_0286E8_TGID_ENA(1) | + S_0286E8_DISABLE_INDEX_PACK(1)); /* The LOOP_CONST registers are an optimizations for loops that allows * you to store the initial counter, increment value, and maximum @@ -844,103 +1158,57 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) eg_store_loop_const(cb, R_03A200_SQ_LOOP_CONST_0 + (160 * 4), 0x1000FFF); } -void evergreen_init_compute_state_functions(struct r600_context *ctx) +void evergreen_init_compute_state_functions(struct r600_context *rctx) { - ctx->b.b.create_compute_state = evergreen_create_compute_state; - ctx->b.b.delete_compute_state = evergreen_delete_compute_state; - ctx->b.b.bind_compute_state = evergreen_bind_compute_state; -// ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; - ctx->b.b.set_compute_resources = evergreen_set_compute_resources; - ctx->b.b.set_global_binding = evergreen_set_global_binding; - ctx->b.b.launch_grid = evergreen_launch_grid; - - /* We always use at least one vertex buffer for parameters (id = 1)*/ - ctx->cs_vertex_buffer_state.enabled_mask = - ctx->cs_vertex_buffer_state.dirty_mask = 0x2; -} - -struct pipe_resource *r600_compute_global_buffer_create( - struct pipe_screen *screen, - const struct pipe_resource *templ) -{ - struct r600_resource_global* result = NULL; - struct r600_screen* rscreen = NULL; - int size_in_dw = 0; - - assert(templ->target == PIPE_BUFFER); - assert(templ->bind & PIPE_BIND_GLOBAL); - assert(templ->array_size == 1 || templ->array_size == 0); - assert(templ->depth0 == 1 || templ->depth0 == 0); - assert(templ->height0 == 1 || templ->height0 == 0); - - result = (struct r600_resource_global*) - CALLOC(sizeof(struct r600_resource_global), 1); - rscreen = (struct r600_screen*)screen; - - COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n"); - COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0, - templ->array_size); - - result->base.b.vtbl = &r600_global_buffer_vtbl; - result->base.b.b.screen = screen; - result->base.b.b = *templ; - pipe_reference_init(&result->base.b.b.reference, 1); - - size_in_dw = (templ->width0+3) / 4; + rctx->b.b.create_compute_state = evergreen_create_compute_state; + rctx->b.b.delete_compute_state = evergreen_delete_compute_state; + rctx->b.b.bind_compute_state = evergreen_bind_compute_state; +// rctx->context.create_sampler_view = evergreen_compute_create_sampler_view; + rctx->b.b.set_compute_resources = evergreen_set_compute_resources; + rctx->b.b.set_global_binding = evergreen_set_global_binding; + rctx->b.b.launch_grid = evergreen_launch_grid; - result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw); - - if (result->chunk == NULL) - { - free(result); - return NULL; - } - - return &result->base.b.b; } -void r600_compute_global_buffer_destroy( - struct pipe_screen *screen, - struct pipe_resource *res) +static void *r600_compute_global_transfer_map(struct pipe_context *ctx, + struct pipe_resource *resource, + unsigned level, + unsigned usage, + const struct pipe_box *box, + struct pipe_transfer **ptransfer) { - struct r600_resource_global* buffer = NULL; - struct r600_screen* rscreen = NULL; + struct r600_context *rctx = (struct r600_context*)ctx; + struct compute_memory_pool *pool = rctx->screen->global_pool; + struct r600_resource_global* buffer = + (struct r600_resource_global*)resource; - assert(res->target == PIPE_BUFFER); - assert(res->bind & PIPE_BIND_GLOBAL); + struct compute_memory_item *item = buffer->chunk; + struct pipe_resource *dst = NULL; + unsigned offset = box->x; - buffer = (struct r600_resource_global*)res; - rscreen = (struct r600_screen*)screen; + if (is_item_in_pool(item)) { + compute_memory_demote_item(pool, item, ctx); + } + else { + if (item->real_buffer == NULL) { + item->real_buffer = + r600_compute_buffer_alloc_vram(pool->screen, item->size_in_dw * 4); + } + } - compute_memory_free(rscreen->global_pool, buffer->chunk->id); + dst = (struct pipe_resource*)item->real_buffer; - buffer->chunk = NULL; - free(res); -} - -void *r600_compute_global_transfer_map( - struct pipe_context *ctx_, - struct pipe_resource *resource, - unsigned level, - unsigned usage, - const struct pipe_box *box, - struct pipe_transfer **ptransfer) -{ - struct r600_context *rctx = (struct r600_context*)ctx_; - struct compute_memory_pool *pool = rctx->screen->global_pool; - struct r600_resource_global* buffer = - (struct r600_resource_global*)resource; + if (usage & PIPE_TRANSFER_READ) + buffer->chunk->status |= ITEM_MAPPED_FOR_READING; COMPUTE_DBG(rctx->screen, "* r600_compute_global_transfer_map()\n" "level = %u, usage = %u, box(x = %u, y = %u, z = %u " "width = %u, height = %u, depth = %u)\n", level, usage, box->x, box->y, box->z, box->width, box->height, box->depth); - COMPUTE_DBG(rctx->screen, "Buffer id = %u offset = " - "%u (box.x)\n", buffer->chunk->id, box->x); - + COMPUTE_DBG(rctx->screen, "Buffer id = %"PRIi64" offset = " + "%u (box.x)\n", item->id, box->x); - compute_memory_finalize_pending(pool, ctx_); assert(resource->target == PIPE_BUFFER); assert(resource->bind & PIPE_BIND_GLOBAL); @@ -949,14 +1217,12 @@ void *r600_compute_global_transfer_map( assert(box->z == 0); ///TODO: do it better, mapping is not possible if the pool is too big - return pipe_buffer_map_range(ctx_, (struct pipe_resource*)buffer->chunk->pool->bo, - box->x + (buffer->chunk->start_in_dw * 4), - box->width, usage, ptransfer); + return pipe_buffer_map_range(ctx, dst, + offset, box->width, usage, ptransfer); } -void r600_compute_global_transfer_unmap( - struct pipe_context *ctx_, - struct pipe_transfer* transfer) +static void r600_compute_global_transfer_unmap(struct pipe_context *ctx, + struct pipe_transfer *transfer) { /* struct r600_resource_global are not real resources, they just map * to an offset within the compute memory pool. The function @@ -971,23 +1237,75 @@ void r600_compute_global_transfer_unmap( assert (!"This function should not be called"); } -void r600_compute_global_transfer_flush_region( - struct pipe_context *ctx_, - struct pipe_transfer *transfer, - const struct pipe_box *box) +static void r600_compute_global_transfer_flush_region(struct pipe_context *ctx, + struct pipe_transfer *transfer, + const struct pipe_box *box) { assert(0 && "TODO"); } -void r600_compute_global_transfer_inline_write( - struct pipe_context *pipe, - struct pipe_resource *resource, - unsigned level, - unsigned usage, - const struct pipe_box *box, - const void *data, - unsigned stride, - unsigned layer_stride) +static void r600_compute_global_buffer_destroy(struct pipe_screen *screen, + struct pipe_resource *res) { - assert(0 && "TODO"); + struct r600_resource_global* buffer = NULL; + struct r600_screen* rscreen = NULL; + + assert(res->target == PIPE_BUFFER); + assert(res->bind & PIPE_BIND_GLOBAL); + + buffer = (struct r600_resource_global*)res; + rscreen = (struct r600_screen*)screen; + + compute_memory_free(rscreen->global_pool, buffer->chunk->id); + + buffer->chunk = NULL; + free(res); +} + +static const struct u_resource_vtbl r600_global_buffer_vtbl = +{ + u_default_resource_get_handle, /* get_handle */ + r600_compute_global_buffer_destroy, /* resource_destroy */ + r600_compute_global_transfer_map, /* transfer_map */ + r600_compute_global_transfer_flush_region,/* transfer_flush_region */ + r600_compute_global_transfer_unmap, /* transfer_unmap */ +}; + +struct pipe_resource *r600_compute_global_buffer_create(struct pipe_screen *screen, + const struct pipe_resource *templ) +{ + struct r600_resource_global* result = NULL; + struct r600_screen* rscreen = NULL; + int size_in_dw = 0; + + assert(templ->target == PIPE_BUFFER); + assert(templ->bind & PIPE_BIND_GLOBAL); + assert(templ->array_size == 1 || templ->array_size == 0); + assert(templ->depth0 == 1 || templ->depth0 == 0); + assert(templ->height0 == 1 || templ->height0 == 0); + + result = (struct r600_resource_global*) + CALLOC(sizeof(struct r600_resource_global), 1); + rscreen = (struct r600_screen*)screen; + + COMPUTE_DBG(rscreen, "*** r600_compute_global_buffer_create\n"); + COMPUTE_DBG(rscreen, "width = %u array_size = %u\n", templ->width0, + templ->array_size); + + result->base.b.vtbl = &r600_global_buffer_vtbl; + result->base.b.b = *templ; + result->base.b.b.screen = screen; + pipe_reference_init(&result->base.b.b.reference, 1); + + size_in_dw = (templ->width0+3) / 4; + + result->chunk = compute_memory_alloc(rscreen->global_pool, size_in_dw); + + if (result->chunk == NULL) + { + free(result); + return NULL; + } + + return &result->base.b.b; }