From d5b23dfc1c07f98afe749053b9cb4b69829fe3d4 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Marek=20Ol=C5=A1=C3=A1k?= Date: Tue, 13 Aug 2013 21:49:59 +0200 Subject: [PATCH] r600g: move streamout state to drivers/radeon MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit This streamout state code will be used by radeonsi. There are new structures r600_common_context and r600_common_screen. What is inherited by what is shown here: pipe_context -> r600_common_context -> r600_context pipe_screen -> r600_common_screen -> r600_screen The common structures reside in drivers/radeon. Currently they only contain enough functionality to be able to handle streamout. Eventually I'd like the whole pipe_screen implementation to be shared and some of the context stuff too. This is quite big, but most changes are because of the new structures and the fact r600_write_value is replaced by radeon_emit. Thanks to Tom Stellard for fixing the build for r600g/compute. Reviewed-by: Michel Dänzer Reviewed-by: Christian König Tested-by: Tom Stellard --- .../drivers/r600/compute_memory_pool.c | 4 +- src/gallium/drivers/r600/evergreen_compute.c | 130 ++--- .../drivers/r600/evergreen_hw_context.c | 71 +-- src/gallium/drivers/r600/evergreen_state.c | 454 +++++++++--------- src/gallium/drivers/r600/evergreend.h | 16 +- src/gallium/drivers/r600/r600_asm.c | 8 +- src/gallium/drivers/r600/r600_blit.c | 42 +- src/gallium/drivers/r600/r600_buffer.c | 28 +- src/gallium/drivers/r600/r600_hw_context.c | 363 ++++---------- src/gallium/drivers/r600/r600_isa.c | 4 +- src/gallium/drivers/r600/r600_pipe.c | 313 ++++++------ src/gallium/drivers/r600/r600_pipe.h | 201 +------- src/gallium/drivers/r600/r600_query.c | 58 +-- src/gallium/drivers/r600/r600_resource.c | 8 +- src/gallium/drivers/r600/r600_resource.h | 28 +- src/gallium/drivers/r600/r600_shader.c | 16 +- src/gallium/drivers/r600/r600_state.c | 356 +++++++------- src/gallium/drivers/r600/r600_state_common.c | 309 ++++-------- src/gallium/drivers/r600/r600_texture.c | 40 +- src/gallium/drivers/r600/r600_uvd.c | 14 +- src/gallium/drivers/r600/sb/sb_core.cpp | 4 +- src/gallium/drivers/radeon/Makefile.sources | 2 + src/gallium/drivers/radeon/r600_cs.h | 97 ++++ src/gallium/drivers/radeon/r600_pipe_common.c | 85 ++++ src/gallium/drivers/radeon/r600_pipe_common.h | 179 +++++++ src/gallium/drivers/radeon/r600_streamout.c | 338 +++++++++++++ src/gallium/drivers/radeon/r600d_common.h | 143 ++++++ 27 files changed, 1825 insertions(+), 1486 deletions(-) create mode 100644 src/gallium/drivers/radeon/r600_cs.h create mode 100644 src/gallium/drivers/radeon/r600_pipe_common.c create mode 100644 src/gallium/drivers/radeon/r600_pipe_common.h create mode 100644 src/gallium/drivers/radeon/r600_streamout.c create mode 100644 src/gallium/drivers/radeon/r600d_common.h diff --git a/src/gallium/drivers/r600/compute_memory_pool.c b/src/gallium/drivers/r600/compute_memory_pool.c index 454af900bc6..a02df844ff0 100644 --- a/src/gallium/drivers/r600/compute_memory_pool.c +++ b/src/gallium/drivers/r600/compute_memory_pool.c @@ -79,7 +79,7 @@ void compute_memory_pool_delete(struct compute_memory_pool* pool) COMPUTE_DBG(pool->screen, "* compute_memory_pool_delete()\n"); free(pool->shadow); if (pool->bo) { - pool->screen->screen.resource_destroy((struct pipe_screen *) + pool->screen->b.b.resource_destroy((struct pipe_screen *) pool->screen, (struct pipe_resource *)pool->bo); } free(pool); @@ -176,7 +176,7 @@ void compute_memory_grow_pool(struct compute_memory_pool* pool, compute_memory_shadow(pool, pipe, 1); pool->shadow = realloc(pool->shadow, new_size_in_dw*4); pool->size_in_dw = new_size_in_dw; - pool->screen->screen.resource_destroy( + pool->screen->b.b.resource_destroy( (struct pipe_screen *)pool->screen, (struct pipe_resource *)pool->bo); pool->bo = (struct r600_resource*)r600_compute_buffer_alloc_vram( diff --git a/src/gallium/drivers/r600/evergreen_compute.c b/src/gallium/drivers/r600/evergreen_compute.c index 9b2bae3e841..fbbc4fde8db 100644 --- a/src/gallium/drivers/r600/evergreen_compute.c +++ b/src/gallium/drivers/r600/evergreen_compute.c @@ -126,7 +126,7 @@ static void evergreen_set_rat( rat_templ.u.tex.last_layer = 0; /* Add the RAT the list of color buffers */ - pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->context.create_surface( + pipe->ctx->framebuffer.state.cbufs[id] = pipe->ctx->b.b.create_surface( (struct pipe_context *)pipe->ctx, (struct pipe_resource *)bo, &rat_templ); @@ -159,7 +159,7 @@ static void evergreen_cs_set_vertex_buffer( /* The vertex instructions in the compute shaders use the texture cache, * so we need to invalidate it. */ - rctx->flags |= R600_CONTEXT_INV_VERTEX_CACHE; + rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE; state->enabled_mask |= 1 << vb_index; state->dirty_mask |= 1 << vb_index; state->atom.dirty = true; @@ -178,7 +178,7 @@ static void evergreen_cs_set_constant_buffer( cb.buffer = buffer; cb.user_buffer = NULL; - rctx->context.set_constant_buffer(&rctx->context, PIPE_SHADER_COMPUTE, cb_index, &cb); + rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, &cb); } static const struct u_resource_vtbl r600_global_buffer_vtbl = @@ -326,10 +326,10 @@ static void evergreen_emit_direct_dispatch( const uint *block_layout, const uint *grid_layout) { int i; - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_pipe_compute *shader = rctx->cs_shader_state.shader; unsigned num_waves; - unsigned num_pipes = rctx->screen->info.r600_max_pipes; + unsigned num_pipes = rctx->screen->b.info.r600_max_pipes; unsigned wave_divisor = (16 * num_pipes); int group_size = 1; int grid_size = 1; @@ -356,19 +356,19 @@ static void evergreen_emit_direct_dispatch( r600_write_config_reg(cs, R_008970_VGT_NUM_INDICES, group_size); r600_write_config_reg_seq(cs, R_00899C_VGT_COMPUTE_START_X, 3); - r600_write_value(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */ - r600_write_value(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */ - r600_write_value(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */ + radeon_emit(cs, 0); /* R_00899C_VGT_COMPUTE_START_X */ + radeon_emit(cs, 0); /* R_0089A0_VGT_COMPUTE_START_Y */ + radeon_emit(cs, 0); /* R_0089A4_VGT_COMPUTE_START_Z */ r600_write_config_reg(cs, R_0089AC_VGT_COMPUTE_THREAD_GROUP_SIZE, group_size); r600_write_compute_context_reg_seq(cs, R_0286EC_SPI_COMPUTE_NUM_THREAD_X, 3); - r600_write_value(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ - r600_write_value(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ - r600_write_value(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ + radeon_emit(cs, block_layout[0]); /* R_0286EC_SPI_COMPUTE_NUM_THREAD_X */ + radeon_emit(cs, block_layout[1]); /* R_0286F0_SPI_COMPUTE_NUM_THREAD_Y */ + radeon_emit(cs, block_layout[2]); /* R_0286F4_SPI_COMPUTE_NUM_THREAD_Z */ - if (rctx->chip_class < CAYMAN) { + if (rctx->b.chip_class < CAYMAN) { assert(lds_size <= 8192); } else { /* Cayman appears to have a slightly smaller limit, see the @@ -380,24 +380,24 @@ static void evergreen_emit_direct_dispatch( lds_size | (num_waves << 14)); /* Dispatch packet */ - r600_write_value(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0)); - r600_write_value(cs, grid_layout[0]); - r600_write_value(cs, grid_layout[1]); - r600_write_value(cs, grid_layout[2]); + radeon_emit(cs, PKT3C(PKT3_DISPATCH_DIRECT, 3, 0)); + radeon_emit(cs, grid_layout[0]); + radeon_emit(cs, grid_layout[1]); + radeon_emit(cs, grid_layout[2]); /* VGT_DISPATCH_INITIATOR = COMPUTE_SHADER_EN */ - r600_write_value(cs, 1); + radeon_emit(cs, 1); } static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, const uint *grid_layout) { - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; unsigned flush_flags = 0; int i; /* make sure that the gfx ring is only one active */ - if (ctx->rings.dma.cs) { - ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); + if (ctx->b.rings.dma.cs) { + ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); } /* Initialize all the compute-related registers. @@ -407,36 +407,36 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, */ r600_emit_command_buffer(cs, &ctx->start_compute_cs_cmd); - ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; + ctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; r600_flush_emit(ctx); /* Emit colorbuffers. */ /* XXX support more than 8 colorbuffers (the offsets are not a multiple of 0x3C for CB8-11) */ for (i = 0; i < 8 && i < ctx->framebuffer.state.nr_cbufs; i++) { struct r600_surface *cb = (struct r600_surface*)ctx->framebuffer.state.cbufs[i]; - unsigned reloc = r600_context_bo_reloc(ctx, &ctx->rings.gfx, + unsigned reloc = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, (struct r600_resource*)cb->base.texture, RADEON_USAGE_READWRITE); r600_write_compute_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 7); - r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ - r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ - r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ - r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ - r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ - r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ - r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ + radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ + radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ + radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ + radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, reloc); if (!ctx->keep_tiling_flags) { - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, reloc); } - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, reloc); } if (ctx->keep_tiling_flags) { for (; i < 8 ; i++) { @@ -469,7 +469,7 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, /* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff */ - ctx->flags |= R600_CONTEXT_INV_CONST_CACHE | + ctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE | R600_CONTEXT_INV_VERTEX_CACHE | R600_CONTEXT_INV_TEX_CACHE; r600_flush_emit(ctx); @@ -486,9 +486,9 @@ static void compute_emit_cs(struct r600_context *ctx, const uint *block_layout, flush_flags |= RADEON_FLUSH_KEEP_TILING_FLAGS; } - ctx->ws->cs_flush(ctx->rings.gfx.cs, flush_flags, ctx->screen->cs_count++); + ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flush_flags, ctx->screen->cs_count++); - ctx->flags = 0; + ctx->b.flags = 0; COMPUTE_DBG(ctx->screen, "shader started\n"); } @@ -505,20 +505,20 @@ void evergreen_emit_cs_shader( (struct r600_cs_shader_state*)atom; struct r600_pipe_compute *shader = state->shader; struct r600_kernel *kernel = &shader->kernels[state->kernel_index]; - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint64_t va; - va = r600_resource_va(&rctx->screen->screen, &kernel->code_bo->b.b); + va = r600_resource_va(&rctx->screen->b.b, &kernel->code_bo->b.b); r600_write_compute_context_reg_seq(cs, R_0288D0_SQ_PGM_START_LS, 3); - r600_write_value(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */ - r600_write_value(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */ + radeon_emit(cs, va >> 8); /* R_0288D0_SQ_PGM_START_LS */ + radeon_emit(cs, /* R_0288D4_SQ_PGM_RESOURCES_LS */ S_0288D4_NUM_GPRS(kernel->bc.ngpr) | S_0288D4_STACK_SIZE(kernel->bc.nstack)); - r600_write_value(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */ + radeon_emit(cs, 0); /* R_0288D8_SQ_PGM_RESOURCES_LS_2 */ - r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0)); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, + radeon_emit(cs, PKT3C(PKT3_NOP, 0, 0)); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, kernel->code_bo, RADEON_USAGE_READ)); } @@ -546,11 +546,11 @@ static void evergreen_launch_grid( unsigned sb_disasm = use_sb || (ctx->screen->debug_flags & DBG_SB_DISASM); - r600_bytecode_init(bc, ctx->chip_class, ctx->family, + r600_bytecode_init(bc, ctx->b.chip_class, ctx->b.family, ctx->screen->has_compressed_msaa_texturing); bc->type = TGSI_PROCESSOR_COMPUTE; bc->isa = ctx->isa; - r600_llvm_compile(mod, ctx->family, bc, &use_kill, dump); + r600_llvm_compile(mod, ctx->b.family, bc, &use_kill, dump); if (dump && !sb_disasm) { r600_bytecode_disasm(bc); @@ -563,7 +563,7 @@ static void evergreen_launch_grid( kernel->bc.ndw * 4); p = r600_buffer_mmap_sync_with_rings(ctx, kernel->code_bo, PIPE_TRANSFER_WRITE); memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4); - ctx->ws->buffer_unmap(kernel->code_bo->cs_buf); + ctx->b.ws->buffer_unmap(kernel->code_bo->cs_buf); } #endif shader->active_kernel = kernel; @@ -706,7 +706,7 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - switch (ctx->family) { + switch (ctx->b.family) { case CHIP_CEDAR: default: num_threads = 128; @@ -752,18 +752,18 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) } /* Config Registers */ - if (ctx->chip_class < CAYMAN) - evergreen_init_common_regs(cb, ctx->chip_class, ctx->family, - ctx->screen->info.drm_minor); + if (ctx->b.chip_class < CAYMAN) + evergreen_init_common_regs(cb, ctx->b.chip_class, ctx->b.family, + ctx->screen->b.info.drm_minor); else - cayman_init_common_regs(cb, ctx->chip_class, ctx->family, - ctx->screen->info.drm_minor); + cayman_init_common_regs(cb, ctx->b.chip_class, ctx->b.family, + ctx->screen->b.info.drm_minor); /* The primitive type always needs to be POINTLIST for compute. */ r600_store_config_reg(cb, R_008958_VGT_PRIMITIVE_TYPE, V_008958_DI_PT_POINTLIST); - if (ctx->chip_class < CAYMAN) { + if (ctx->b.chip_class < CAYMAN) { /* These registers control which simds can be used by each stage. * The default for these registers is 0xffffffff, which means @@ -813,7 +813,7 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) * allocate the appropriate amount of LDS dwords using the * CM_R_0288E8_SQ_LDS_ALLOC register. */ - if (ctx->chip_class < CAYMAN) { + if (ctx->b.chip_class < CAYMAN) { r600_store_config_reg(cb, R_008E2C_SQ_LDS_RESOURCE_MGMT, S_008E2C_NUM_PS_LDS(0x0000) | S_008E2C_NUM_LS_LDS(8192)); } else { @@ -824,7 +824,7 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) /* Context Registers */ - if (ctx->chip_class < CAYMAN) { + if (ctx->b.chip_class < CAYMAN) { /* workaround for hw issues with dyn gpr - must set all limits * to 240 instead of 0, 0x1e == 240 / 8 */ @@ -868,15 +868,15 @@ void evergreen_init_atom_start_compute_cs(struct r600_context *ctx) void evergreen_init_compute_state_functions(struct r600_context *ctx) { - ctx->context.create_compute_state = evergreen_create_compute_state; - ctx->context.delete_compute_state = evergreen_delete_compute_state; - ctx->context.bind_compute_state = evergreen_bind_compute_state; + ctx->b.b.create_compute_state = evergreen_create_compute_state; + ctx->b.b.delete_compute_state = evergreen_delete_compute_state; + ctx->b.b.bind_compute_state = evergreen_bind_compute_state; // ctx->context.create_sampler_view = evergreen_compute_create_sampler_view; - ctx->context.set_compute_resources = evergreen_set_compute_resources; - ctx->context.set_compute_sampler_views = evergreen_set_cs_sampler_view; - ctx->context.bind_compute_sampler_states = evergreen_bind_compute_sampler_states; - ctx->context.set_global_binding = evergreen_set_global_binding; - ctx->context.launch_grid = evergreen_launch_grid; + ctx->b.b.set_compute_resources = evergreen_set_compute_resources; + ctx->b.b.set_compute_sampler_views = evergreen_set_cs_sampler_view; + ctx->b.b.bind_compute_sampler_states = evergreen_bind_compute_sampler_states; + ctx->b.b.set_global_binding = evergreen_set_global_binding; + ctx->b.b.launch_grid = evergreen_launch_grid; /* We always use at least one vertex buffer for parameters (id = 1)*/ ctx->cs_vertex_buffer_state.enabled_mask = diff --git a/src/gallium/drivers/r600/evergreen_hw_context.c b/src/gallium/drivers/r600/evergreen_hw_context.c index 93c9c58fd01..2cefecaf123 100644 --- a/src/gallium/drivers/r600/evergreen_hw_context.c +++ b/src/gallium/drivers/r600/evergreen_hw_context.c @@ -28,37 +28,6 @@ #include "util/u_memory.h" #include "util/u_math.h" -void evergreen_flush_vgt_streamout(struct r600_context *ctx) -{ - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; - - r600_write_config_reg(cs, R_0084FC_CP_STRMOUT_CNTL, 0); - - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0); - - cs->buf[cs->cdw++] = PKT3(PKT3_WAIT_REG_MEM, 5, 0); - cs->buf[cs->cdw++] = WAIT_REG_MEM_EQUAL; /* wait until the register is equal to the reference value */ - cs->buf[cs->cdw++] = R_0084FC_CP_STRMOUT_CNTL >> 2; /* register */ - cs->buf[cs->cdw++] = 0; - cs->buf[cs->cdw++] = S_0084FC_OFFSET_UPDATE_DONE(1); /* reference value */ - cs->buf[cs->cdw++] = S_0084FC_OFFSET_UPDATE_DONE(1); /* mask */ - cs->buf[cs->cdw++] = 4; /* poll interval */ -} - -void evergreen_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit) -{ - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; - - if (buffer_enable_bit) { - r600_write_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2); - r600_write_value(cs, S_028B94_STREAMOUT_0_EN(1)); /* R_028B94_VGT_STRMOUT_CONFIG */ - r600_write_value(cs, S_028B98_STREAM_0_BUFFER_EN(buffer_enable_bit)); /* R_028B98_VGT_STRMOUT_BUFFER_CONFIG */ - } else { - r600_write_context_reg(cs, R_028B94_VGT_STRMOUT_CONFIG, S_028B94_STREAMOUT_0_EN(0)); - } -} - void evergreen_dma_copy(struct r600_context *rctx, struct pipe_resource *dst, struct pipe_resource *src, @@ -66,15 +35,15 @@ void evergreen_dma_copy(struct r600_context *rctx, uint64_t src_offset, uint64_t size) { - struct radeon_winsys_cs *cs = rctx->rings.dma.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs; unsigned i, ncopy, csize, sub_cmd, shift; struct r600_resource *rdst = (struct r600_resource*)dst; struct r600_resource *rsrc = (struct r600_resource*)src; /* make sure that the dma ring is only one active */ - rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC); - dst_offset += r600_resource_va(&rctx->screen->screen, dst); - src_offset += r600_resource_va(&rctx->screen->screen, src); + rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC); + dst_offset += r600_resource_va(&rctx->screen->b.b, dst); + src_offset += r600_resource_va(&rctx->screen->b.b, src); /* see if we use dword or byte copy */ if (!(dst_offset & 0x3) && !(src_offset & 0x3) && !(size & 0x3)) { @@ -91,8 +60,8 @@ void evergreen_dma_copy(struct r600_context *rctx, for (i = 0; i < ncopy; i++) { csize = size < 0x000fffff ? size : 0x000fffff; /* emit reloc before writting cs so that cs is always in consistent state */ - r600_context_bo_reloc(rctx, &rctx->rings.dma, rsrc, RADEON_USAGE_READ); - r600_context_bo_reloc(rctx, &rctx->rings.dma, rdst, RADEON_USAGE_WRITE); + r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rsrc, RADEON_USAGE_READ); + r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rdst, RADEON_USAGE_WRITE); cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, sub_cmd, csize); cs->buf[cs->cdw++] = dst_offset & 0xffffffff; cs->buf[cs->cdw++] = src_offset & 0xffffffff; @@ -114,26 +83,26 @@ void evergreen_cp_dma_clear_buffer(struct r600_context *rctx, struct pipe_resource *dst, uint64_t offset, unsigned size, uint32_t clear_value) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; assert(size); assert(rctx->screen->has_cp_dma); - offset += r600_resource_va(&rctx->screen->screen, dst); + offset += r600_resource_va(&rctx->screen->b.b, dst); /* Flush the cache where the resource is bound. */ r600_flag_resource_cache_flush(rctx, dst); - rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; while (size) { unsigned sync = 0; unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT); unsigned reloc; - r600_need_cs_space(rctx, 10 + (rctx->flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE); + r600_need_cs_space(rctx, 10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE); /* Flush the caches for the first copy only. */ - if (rctx->flags) { + if (rctx->b.flags) { r600_flush_emit(rctx); } @@ -143,18 +112,18 @@ void evergreen_cp_dma_clear_buffer(struct r600_context *rctx, } /* This must be done after r600_need_cs_space. */ - reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, + reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE); - r600_write_value(cs, PKT3(PKT3_CP_DMA, 4, 0)); - r600_write_value(cs, clear_value); /* DATA [31:0] */ - r600_write_value(cs, sync | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */ - r600_write_value(cs, offset); /* DST_ADDR_LO [31:0] */ - r600_write_value(cs, (offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */ - r600_write_value(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */ + radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0)); + radeon_emit(cs, clear_value); /* DATA [31:0] */ + radeon_emit(cs, sync | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */ + radeon_emit(cs, offset); /* DST_ADDR_LO [31:0] */ + radeon_emit(cs, (offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */ + radeon_emit(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */ - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); size -= byte_count; offset += byte_count; diff --git a/src/gallium/drivers/r600/evergreen_state.c b/src/gallium/drivers/r600/evergreen_state.c index 980e75aa7bd..887f736a1c0 100644 --- a/src/gallium/drivers/r600/evergreen_state.c +++ b/src/gallium/drivers/r600/evergreen_state.c @@ -968,7 +968,7 @@ static void *evergreen_create_rs_state(struct pipe_context *ctx, S_028A48_VPORT_SCISSOR_ENABLE(state->scissor) | S_028A48_LINE_STIPPLE_ENABLE(state->line_stipple_enable)); - if (rctx->chip_class == CAYMAN) { + if (rctx->b.chip_class == CAYMAN) { r600_store_context_reg(&rs->buffer, CM_R_028BE4_PA_SU_VTX_CNTL, S_028C08_PIX_CENTER_HALF(state->half_pixel_center) | S_028C08_QUANT_MODE(V_028C08_X_1_256TH)); @@ -1191,7 +1191,7 @@ evergreen_create_sampler_view_custom(struct pipe_context *ctx, fmask_bankh = eg_bank_wh(tmp->fmask_bank_height); /* 128 bit formats require tile type = 1 */ - if (rscreen->chip_class == CAYMAN) { + if (rscreen->b.chip_class == CAYMAN) { if (util_format_get_blocksize(pipe_format) >= 16) non_disp_tiling = 1; } @@ -1209,7 +1209,7 @@ evergreen_create_sampler_view_custom(struct pipe_context *ctx, view->tex_resource_words[0] = (S_030000_DIM(r600_tex_dim(texture->target, texture->nr_samples)) | S_030000_PITCH((pitch / 8) - 1) | S_030000_TEX_WIDTH(width - 1)); - if (rscreen->chip_class == CAYMAN) + if (rscreen->b.chip_class == CAYMAN) view->tex_resource_words[0] |= CM_S_030000_NON_DISP_TILING_ORDER(non_disp_tiling); else view->tex_resource_words[0] |= S_030000_NON_DISP_TILING_ORDER(non_disp_tiling); @@ -1243,7 +1243,7 @@ evergreen_create_sampler_view_custom(struct pipe_context *ctx, if (texture->nr_samples > 1) { unsigned log_samples = util_logbase2(texture->nr_samples); - if (rscreen->chip_class == CAYMAN) { + if (rscreen->b.chip_class == CAYMAN) { view->tex_resource_words[4] |= S_030010_LOG2_NUM_FRAGMENTS(log_samples); } /* LAST_LEVEL holds log2(nr_samples) for multisample textures */ @@ -1277,11 +1277,11 @@ evergreen_create_sampler_view(struct pipe_context *ctx, static void evergreen_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_clip_state *state = &rctx->clip_state.state; r600_write_context_reg_seq(cs, R_0285BC_PA_CL_UCP0_X, 6*4); - r600_write_array(cs, 6*4, (unsigned*)state); + radeon_emit_array(cs, (unsigned*)state, 6*4); } static void evergreen_set_polygon_stipple(struct pipe_context *ctx, @@ -1300,7 +1300,7 @@ static void evergreen_get_scissor_rect(struct r600_context *rctx, tl_y = 1; /* cayman hw workaround */ - if (rctx->chip_class == CAYMAN) { + if (rctx->b.chip_class == CAYMAN) { if (br_x == 1 && br_y == 1) br_x = 2; } @@ -1322,15 +1322,15 @@ static void evergreen_set_scissor_states(struct pipe_context *ctx, static void evergreen_emit_scissor_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_scissor_state *state = &rctx->scissor.scissor; uint32_t tl, br; evergreen_get_scissor_rect(rctx, state->minx, state->miny, state->maxx, state->maxy, &tl, &br); r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2); - r600_write_value(cs, tl); - r600_write_value(cs, br); + radeon_emit(cs, tl); + radeon_emit(cs, br); } /** @@ -1359,7 +1359,7 @@ void evergreen_init_color_surface_rat(struct r600_context *rctx, } surf->cb_color_base = - r600_resource_va(rctx->context.screen, pipe_buffer) >> 8; + r600_resource_va(rctx->b.b.screen, pipe_buffer) >> 8; surf->cb_color_pitch = (pitch / 8) - 1; @@ -1452,7 +1452,7 @@ void evergreen_init_color_surface(struct r600_context *rctx, fmask_bankh = eg_bank_wh(fmask_bankh); /* 128 bit formats require tile type = 1 */ - if (rscreen->chip_class == CAYMAN) { + if (rscreen->b.chip_class == CAYMAN) { if (util_format_get_blocksize(surf->base.format) >= 16) non_disp_tiling = 1; } @@ -1472,7 +1472,7 @@ void evergreen_init_color_surface(struct r600_context *rctx, S_028C74_NON_DISP_TILING_ORDER(non_disp_tiling) | S_028C74_FMASK_BANK_HEIGHT(fmask_bankh); - if (rctx->chip_class == CAYMAN) { + if (rctx->b.chip_class == CAYMAN) { color_attrib |= S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == UTIL_FORMAT_SWIZZLE_1); @@ -1559,7 +1559,7 @@ void evergreen_init_color_surface(struct r600_context *rctx, color_info |= S_028C70_COMPRESSION(1) | S_028C70_FAST_CLEAR(1); } - base_offset = r600_resource_va(rctx->context.screen, pipe_tex); + base_offset = r600_resource_va(rctx->b.b.screen, pipe_tex); /* XXX handle enabling of CB beyond BASE8 which has different offset */ surf->cb_color_base = (base_offset + offset) >> 8; @@ -1591,7 +1591,7 @@ static void evergreen_init_depth_surface(struct r600_context *rctx, struct r600_surface *surf) { struct r600_screen *rscreen = rctx->screen; - struct pipe_screen *screen = &rscreen->screen; + struct pipe_screen *screen = &rscreen->b.b; struct r600_texture *rtex = (struct r600_texture*)surf->base.texture; uint64_t offset; unsigned level, pitch, slice, format, array_mode; @@ -1637,7 +1637,7 @@ static void evergreen_init_depth_surface(struct r600_context *rctx, S_028040_BANK_WIDTH(bankw) | S_028040_BANK_HEIGHT(bankh) | S_028040_MACRO_TILE_ASPECT(macro_aspect); - if (rscreen->chip_class == CAYMAN && rtex->resource.b.b.nr_samples > 1) { + if (rscreen->b.chip_class == CAYMAN && rtex->resource.b.b.nr_samples > 1) { surf->db_depth_info |= S_028040_NUM_SAMPLES(util_logbase2(rtex->resource.b.b.nr_samples)); } surf->db_depth_base = offset; @@ -1683,7 +1683,7 @@ static void evergreen_init_depth_surface(struct r600_context *rctx, surf->db_stencil_base = offset; /* DRM 2.6.18 allows the INVALID format to disable stencil. * Older kernels are out of luck. */ - surf->db_stencil_info = rctx->screen->info.drm_minor >= 18 ? + surf->db_stencil_info = rctx->screen->b.info.drm_minor >= 18 ? S_028044_FORMAT(V_028044_STENCIL_INVALID) : S_028044_FORMAT(V_028044_STENCIL_8); } @@ -1691,7 +1691,7 @@ static void evergreen_init_depth_surface(struct r600_context *rctx, surf->htile_enabled = 0; /* use htile only for first level */ if (rtex->htile && !level) { - uint64_t va = r600_resource_va(&rctx->screen->screen, &rtex->htile->b.b); + uint64_t va = r600_resource_va(&rctx->screen->b.b, &rtex->htile->b.b); surf->htile_enabled = 1; surf->db_htile_data_base = va >> 8; surf->db_htile_surface = S_028ABC_HTILE_WIDTH(1) | @@ -1714,20 +1714,20 @@ static void evergreen_set_framebuffer_state(struct pipe_context *ctx, uint32_t i, log_samples; if (rctx->framebuffer.state.nr_cbufs) { - rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_CB; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB; if (rctx->framebuffer.state.cbufs[0]->texture->nr_samples > 1) { - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META; + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META; } } if (rctx->framebuffer.state.zsbuf) { - rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_DB; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB; rtex = (struct r600_texture*)rctx->framebuffer.state.zsbuf->texture; if (rtex->htile) { - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META; + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META; } } @@ -1816,7 +1816,7 @@ static void evergreen_set_framebuffer_state(struct pipe_context *ctx, } log_samples = util_logbase2(rctx->framebuffer.nr_samples); - if (rctx->chip_class == CAYMAN && rctx->db_misc_state.log_samples != log_samples) { + if (rctx->b.chip_class == CAYMAN && rctx->db_misc_state.log_samples != log_samples) { rctx->db_misc_state.log_samples = log_samples; rctx->db_misc_state.atom.dirty = true; } @@ -1827,7 +1827,7 @@ static void evergreen_set_framebuffer_state(struct pipe_context *ctx, rctx->framebuffer.atom.num_dw = 4; /* SCISSOR */ /* MSAA. */ - if (rctx->chip_class == EVERGREEN) { + if (rctx->b.chip_class == EVERGREEN) { switch (rctx->framebuffer.nr_samples) { case 2: case 4: @@ -1865,7 +1865,7 @@ static void evergreen_set_framebuffer_state(struct pipe_context *ctx, rctx->framebuffer.atom.num_dw += 24; if (rctx->keep_tiling_flags) rctx->framebuffer.atom.num_dw += 2; - } else if (rctx->screen->info.drm_minor >= 18) { + } else if (rctx->screen->b.info.drm_minor >= 18) { rctx->framebuffer.atom.num_dw += 4; } @@ -1951,7 +1951,7 @@ static void evergreen_get_sample_position(struct pipe_context *ctx, static void evergreen_emit_msaa_state(struct r600_context *rctx, int nr_samples) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; unsigned max_dist = 0; switch (nr_samples) { @@ -1960,31 +1960,31 @@ static void evergreen_emit_msaa_state(struct r600_context *rctx, int nr_samples) break; case 2: r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(sample_locs_2x)); - r600_write_array(cs, Elements(sample_locs_2x), sample_locs_2x); + radeon_emit_array(cs, sample_locs_2x, Elements(sample_locs_2x)); max_dist = max_dist_2x; break; case 4: r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(sample_locs_4x)); - r600_write_array(cs, Elements(sample_locs_4x), sample_locs_4x); + radeon_emit_array(cs, sample_locs_4x, Elements(sample_locs_4x)); max_dist = max_dist_4x; break; case 8: r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_0, Elements(sample_locs_8x)); - r600_write_array(cs, Elements(sample_locs_8x), sample_locs_8x); + radeon_emit_array(cs, sample_locs_8x, Elements(sample_locs_8x)); max_dist = max_dist_8x; break; } if (nr_samples > 1) { r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); - r600_write_value(cs, S_028C00_LAST_PIXEL(1) | + radeon_emit(cs, S_028C00_LAST_PIXEL(1) | S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */ - r600_write_value(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | + radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */ } else { r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); - r600_write_value(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ - r600_write_value(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ + radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ + radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ } } @@ -2071,7 +2071,7 @@ static void cayman_emit_msaa_state(struct r600_context *rctx, int nr_samples) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; unsigned max_dist = 0; switch (nr_samples) { @@ -2094,40 +2094,40 @@ static void cayman_emit_msaa_state(struct r600_context *rctx, int nr_samples) break; case 8: r600_write_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 14); - r600_write_value(cs, cm_sample_locs_8x[0]); - r600_write_value(cs, cm_sample_locs_8x[4]); - r600_write_value(cs, 0); - r600_write_value(cs, 0); - r600_write_value(cs, cm_sample_locs_8x[1]); - r600_write_value(cs, cm_sample_locs_8x[5]); - r600_write_value(cs, 0); - r600_write_value(cs, 0); - r600_write_value(cs, cm_sample_locs_8x[2]); - r600_write_value(cs, cm_sample_locs_8x[6]); - r600_write_value(cs, 0); - r600_write_value(cs, 0); - r600_write_value(cs, cm_sample_locs_8x[3]); - r600_write_value(cs, cm_sample_locs_8x[7]); + radeon_emit(cs, cm_sample_locs_8x[0]); + radeon_emit(cs, cm_sample_locs_8x[4]); + radeon_emit(cs, 0); + radeon_emit(cs, 0); + radeon_emit(cs, cm_sample_locs_8x[1]); + radeon_emit(cs, cm_sample_locs_8x[5]); + radeon_emit(cs, 0); + radeon_emit(cs, 0); + radeon_emit(cs, cm_sample_locs_8x[2]); + radeon_emit(cs, cm_sample_locs_8x[6]); + radeon_emit(cs, 0); + radeon_emit(cs, 0); + radeon_emit(cs, cm_sample_locs_8x[3]); + radeon_emit(cs, cm_sample_locs_8x[7]); max_dist = cm_max_dist_8x; break; case 16: r600_write_context_reg_seq(cs, CM_R_028BF8_PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0, 16); - r600_write_value(cs, cm_sample_locs_16x[0]); - r600_write_value(cs, cm_sample_locs_16x[4]); - r600_write_value(cs, cm_sample_locs_16x[8]); - r600_write_value(cs, cm_sample_locs_16x[12]); - r600_write_value(cs, cm_sample_locs_16x[1]); - r600_write_value(cs, cm_sample_locs_16x[5]); - r600_write_value(cs, cm_sample_locs_16x[9]); - r600_write_value(cs, cm_sample_locs_16x[13]); - r600_write_value(cs, cm_sample_locs_16x[2]); - r600_write_value(cs, cm_sample_locs_16x[6]); - r600_write_value(cs, cm_sample_locs_16x[10]); - r600_write_value(cs, cm_sample_locs_16x[14]); - r600_write_value(cs, cm_sample_locs_16x[3]); - r600_write_value(cs, cm_sample_locs_16x[7]); - r600_write_value(cs, cm_sample_locs_16x[11]); - r600_write_value(cs, cm_sample_locs_16x[15]); + radeon_emit(cs, cm_sample_locs_16x[0]); + radeon_emit(cs, cm_sample_locs_16x[4]); + radeon_emit(cs, cm_sample_locs_16x[8]); + radeon_emit(cs, cm_sample_locs_16x[12]); + radeon_emit(cs, cm_sample_locs_16x[1]); + radeon_emit(cs, cm_sample_locs_16x[5]); + radeon_emit(cs, cm_sample_locs_16x[9]); + radeon_emit(cs, cm_sample_locs_16x[13]); + radeon_emit(cs, cm_sample_locs_16x[2]); + radeon_emit(cs, cm_sample_locs_16x[6]); + radeon_emit(cs, cm_sample_locs_16x[10]); + radeon_emit(cs, cm_sample_locs_16x[14]); + radeon_emit(cs, cm_sample_locs_16x[3]); + radeon_emit(cs, cm_sample_locs_16x[7]); + radeon_emit(cs, cm_sample_locs_16x[11]); + radeon_emit(cs, cm_sample_locs_16x[15]); max_dist = cm_max_dist_16x; break; } @@ -2136,9 +2136,9 @@ static void cayman_emit_msaa_state(struct r600_context *rctx, int nr_samples) unsigned log_samples = util_logbase2(nr_samples); r600_write_context_reg_seq(cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2); - r600_write_value(cs, S_028C00_LAST_PIXEL(1) | + radeon_emit(cs, S_028C00_LAST_PIXEL(1) | S_028C00_EXPAND_LINE_WIDTH(1)); /* CM_R_028BDC_PA_SC_LINE_CNTL */ - r600_write_value(cs, S_028BE0_MSAA_NUM_SAMPLES(log_samples) | + radeon_emit(cs, S_028BE0_MSAA_NUM_SAMPLES(log_samples) | S_028BE0_MAX_SAMPLE_DIST(max_dist) | S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples)); /* CM_R_028BE0_PA_SC_AA_CONFIG */ @@ -2151,8 +2151,8 @@ static void cayman_emit_msaa_state(struct r600_context *rctx, int nr_samples) S_028804_STATIC_ANCHOR_ASSOCIATIONS(1)); } else { r600_write_context_reg_seq(cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2); - r600_write_value(cs, S_028C00_LAST_PIXEL(1)); /* CM_R_028BDC_PA_SC_LINE_CNTL */ - r600_write_value(cs, 0); /* CM_R_028BE0_PA_SC_AA_CONFIG */ + radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* CM_R_028BDC_PA_SC_LINE_CNTL */ + radeon_emit(cs, 0); /* CM_R_028BE0_PA_SC_AA_CONFIG */ r600_write_context_reg(cs, CM_R_028804_DB_EQAA, S_028804_HIGH_QUALITY_INTERSECTIONS(1) | @@ -2162,7 +2162,7 @@ static void cayman_emit_msaa_state(struct r600_context *rctx, int nr_samples) static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_framebuffer_state *state = &rctx->framebuffer.state; unsigned nr_cbufs = state->nr_cbufs; unsigned i, tl, br; @@ -2176,42 +2176,42 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r for (i = 0; i < nr_cbufs; i++) { struct r600_surface *cb = (struct r600_surface*)state->cbufs[i]; struct r600_texture *tex = (struct r600_texture *)cb->base.texture; - unsigned reloc = r600_context_bo_reloc(rctx, - &rctx->rings.gfx, + unsigned reloc = r600_context_bo_reloc(&rctx->b, + &rctx->b.rings.gfx, (struct r600_resource*)cb->base.texture, RADEON_USAGE_READWRITE); r600_write_context_reg_seq(cs, R_028C60_CB_COLOR0_BASE + i * 0x3C, 13); - r600_write_value(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ - r600_write_value(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ - r600_write_value(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ - r600_write_value(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ - r600_write_value(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ - r600_write_value(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ - r600_write_value(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ - r600_write_value(cs, cb->cb_color_cmask); /* R_028C7C_CB_COLOR0_CMASK */ - r600_write_value(cs, cb->cb_color_cmask_slice); /* R_028C80_CB_COLOR0_CMASK_SLICE */ - r600_write_value(cs, cb->cb_color_fmask); /* R_028C84_CB_COLOR0_FMASK */ - r600_write_value(cs, cb->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */ - r600_write_value(cs, tex->color_clear_value[0]); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */ - r600_write_value(cs, tex->color_clear_value[1]); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */ - - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ - r600_write_value(cs, reloc); + radeon_emit(cs, cb->cb_color_base); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, cb->cb_color_pitch); /* R_028C64_CB_COLOR0_PITCH */ + radeon_emit(cs, cb->cb_color_slice); /* R_028C68_CB_COLOR0_SLICE */ + radeon_emit(cs, cb->cb_color_view); /* R_028C6C_CB_COLOR0_VIEW */ + radeon_emit(cs, cb->cb_color_info); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, cb->cb_color_attrib); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, cb->cb_color_dim); /* R_028C78_CB_COLOR0_DIM */ + radeon_emit(cs, cb->cb_color_cmask); /* R_028C7C_CB_COLOR0_CMASK */ + radeon_emit(cs, cb->cb_color_cmask_slice); /* R_028C80_CB_COLOR0_CMASK_SLICE */ + radeon_emit(cs, cb->cb_color_fmask); /* R_028C84_CB_COLOR0_FMASK */ + radeon_emit(cs, cb->cb_color_fmask_slice); /* R_028C88_CB_COLOR0_FMASK_SLICE */ + radeon_emit(cs, tex->color_clear_value[0]); /* R_028C8C_CB_COLOR0_CLEAR_WORD0 */ + radeon_emit(cs, tex->color_clear_value[1]); /* R_028C90_CB_COLOR0_CLEAR_WORD1 */ + + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C60_CB_COLOR0_BASE */ + radeon_emit(cs, reloc); if (!rctx->keep_tiling_flags) { - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, reloc); } - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C74_CB_COLOR0_ATTRIB */ + radeon_emit(cs, reloc); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C7C_CB_COLOR0_CMASK */ + radeon_emit(cs, reloc); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C84_CB_COLOR0_FMASK */ + radeon_emit(cs, reloc); } /* set CB_COLOR1_INFO for possible dual-src blending */ if (i == 1 && !((struct r600_texture*)state->cbufs[0]->texture)->is_rat) { @@ -2219,13 +2219,13 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r ((struct r600_surface*)state->cbufs[0])->cb_color_info); if (!rctx->keep_tiling_flags) { - unsigned reloc = r600_context_bo_reloc(rctx, - &rctx->rings.gfx, + unsigned reloc = r600_context_bo_reloc(&rctx->b, + &rctx->b.rings.gfx, (struct r600_resource*)state->cbufs[0]->texture, RADEON_USAGE_READWRITE); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028C70_CB_COLOR0_INFO */ + radeon_emit(cs, reloc); } i++; } @@ -2241,8 +2241,8 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r /* ZS buffer. */ if (state->zsbuf) { struct r600_surface *zb = (struct r600_surface*)state->zsbuf; - unsigned reloc = r600_context_bo_reloc(rctx, - &rctx->rings.gfx, + unsigned reloc = r600_context_bo_reloc(&rctx->b, + &rctx->b.rings.gfx, (struct r600_resource*)state->zsbuf->texture, RADEON_USAGE_READWRITE); @@ -2251,47 +2251,47 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r r600_write_context_reg(cs, R_028008_DB_DEPTH_VIEW, zb->db_depth_view); r600_write_context_reg_seq(cs, R_028040_DB_Z_INFO, 8); - r600_write_value(cs, zb->db_depth_info); /* R_028040_DB_Z_INFO */ - r600_write_value(cs, zb->db_stencil_info); /* R_028044_DB_STENCIL_INFO */ - r600_write_value(cs, zb->db_depth_base); /* R_028048_DB_Z_READ_BASE */ - r600_write_value(cs, zb->db_stencil_base); /* R_02804C_DB_STENCIL_READ_BASE */ - r600_write_value(cs, zb->db_depth_base); /* R_028050_DB_Z_WRITE_BASE */ - r600_write_value(cs, zb->db_stencil_base); /* R_028054_DB_STENCIL_WRITE_BASE */ - r600_write_value(cs, zb->db_depth_size); /* R_028058_DB_DEPTH_SIZE */ - r600_write_value(cs, zb->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */ + radeon_emit(cs, zb->db_depth_info); /* R_028040_DB_Z_INFO */ + radeon_emit(cs, zb->db_stencil_info); /* R_028044_DB_STENCIL_INFO */ + radeon_emit(cs, zb->db_depth_base); /* R_028048_DB_Z_READ_BASE */ + radeon_emit(cs, zb->db_stencil_base); /* R_02804C_DB_STENCIL_READ_BASE */ + radeon_emit(cs, zb->db_depth_base); /* R_028050_DB_Z_WRITE_BASE */ + radeon_emit(cs, zb->db_stencil_base); /* R_028054_DB_STENCIL_WRITE_BASE */ + radeon_emit(cs, zb->db_depth_size); /* R_028058_DB_DEPTH_SIZE */ + radeon_emit(cs, zb->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */ if (!rctx->keep_tiling_flags) { - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028040_DB_Z_INFO */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028040_DB_Z_INFO */ + radeon_emit(cs, reloc); } - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028048_DB_Z_READ_BASE */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028048_DB_Z_READ_BASE */ + radeon_emit(cs, reloc); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_02804C_DB_STENCIL_READ_BASE */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_02804C_DB_STENCIL_READ_BASE */ + radeon_emit(cs, reloc); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028050_DB_Z_WRITE_BASE */ - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028050_DB_Z_WRITE_BASE */ + radeon_emit(cs, reloc); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028054_DB_STENCIL_WRITE_BASE */ - r600_write_value(cs, reloc); - } else if (rctx->screen->info.drm_minor >= 18) { + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); /* R_028054_DB_STENCIL_WRITE_BASE */ + radeon_emit(cs, reloc); + } else if (rctx->screen->b.info.drm_minor >= 18) { /* DRM 2.6.18 allows the INVALID format to disable depth/stencil. * Older kernels are out of luck. */ r600_write_context_reg_seq(cs, R_028040_DB_Z_INFO, 2); - r600_write_value(cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */ - r600_write_value(cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */ + radeon_emit(cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */ + radeon_emit(cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */ } /* Framebuffer dimensions. */ evergreen_get_scissor_rect(rctx, 0, 0, state->width, state->height, &tl, &br); r600_write_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); - r600_write_value(cs, tl); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */ - r600_write_value(cs, br); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */ + radeon_emit(cs, tl); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */ + radeon_emit(cs, br); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */ - if (rctx->chip_class == EVERGREEN) { + if (rctx->b.chip_class == EVERGREEN) { evergreen_emit_msaa_state(rctx, rctx->framebuffer.nr_samples); } else { cayman_emit_msaa_state(rctx, rctx->framebuffer.nr_samples); @@ -2300,7 +2300,7 @@ static void evergreen_emit_framebuffer_state(struct r600_context *rctx, struct r static void evergreen_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a; float offset_units = state->offset_units; float offset_scale = state->offset_scale; @@ -2319,30 +2319,30 @@ static void evergreen_emit_polygon_offset(struct r600_context *rctx, struct r600 } r600_write_context_reg_seq(cs, R_028B80_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); - r600_write_value(cs, fui(offset_scale)); - r600_write_value(cs, fui(offset_units)); - r600_write_value(cs, fui(offset_scale)); - r600_write_value(cs, fui(offset_units)); + radeon_emit(cs, fui(offset_scale)); + radeon_emit(cs, fui(offset_units)); + radeon_emit(cs, fui(offset_scale)); + radeon_emit(cs, fui(offset_units)); } static void evergreen_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom; unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1; unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1; r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); - r600_write_value(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */ + radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */ /* Always enable the first colorbuffer in CB_SHADER_MASK. This * will assure that the alpha-test will work even if there is * no colorbuffer bound. */ - r600_write_value(cs, 0xf | (a->dual_src_blend ? ps_colormask : 0) | fb_colormask); /* R_02823C_CB_SHADER_MASK */ + radeon_emit(cs, 0xf | (a->dual_src_blend ? ps_colormask : 0) | fb_colormask); /* R_02823C_CB_SHADER_MASK */ } static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_db_state *a = (struct r600_db_state*)atom; if (a->rsurf && a->rsurf->htile_enabled) { @@ -2353,7 +2353,7 @@ static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom r600_write_context_reg(cs, R_028ABC_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); r600_write_context_reg(cs, R_028AC8_DB_PRELOAD_CONTROL, a->rsurf->db_preload_control); r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); - reloc_idx = r600_context_bo_reloc(rctx, &rctx->rings.gfx, rtex->htile, RADEON_USAGE_READWRITE); + reloc_idx = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rtex->htile, RADEON_USAGE_READWRITE); cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); cs->buf[cs->cdw++] = reloc_idx; } else { @@ -2364,7 +2364,7 @@ static void evergreen_emit_db_state(struct r600_context *rctx, struct r600_atom static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom; unsigned db_render_control = 0; unsigned db_count_control = 0; @@ -2374,7 +2374,7 @@ static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_ if (a->occlusion_query_enabled) { db_count_control |= S_028004_PERFECT_ZPASS_COUNTS(1); - if (rctx->chip_class == CAYMAN) { + if (rctx->b.chip_class == CAYMAN) { db_count_control |= S_028004_SAMPLE_RATE(a->log_samples); } db_render_override |= S_02800C_NOOP_CULL_DISABLE(1); @@ -2417,8 +2417,8 @@ static void evergreen_emit_db_misc_state(struct r600_context *rctx, struct r600_ } r600_write_context_reg_seq(cs, R_028000_DB_RENDER_CONTROL, 2); - r600_write_value(cs, db_render_control); /* R_028000_DB_RENDER_CONTROL */ - r600_write_value(cs, db_count_control); /* R_028004_DB_COUNT_CONTROL */ + radeon_emit(cs, db_render_control); /* R_028000_DB_RENDER_CONTROL */ + radeon_emit(cs, db_count_control); /* R_028004_DB_COUNT_CONTROL */ r600_write_context_reg(cs, R_02800C_DB_RENDER_OVERRIDE, db_render_override); r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); } @@ -2428,7 +2428,7 @@ static void evergreen_emit_vertex_buffers(struct r600_context *rctx, unsigned resource_offset, unsigned pkt_flags) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint32_t dirty_mask = state->dirty_mask; while (dirty_mask) { @@ -2441,30 +2441,30 @@ static void evergreen_emit_vertex_buffers(struct r600_context *rctx, rbuffer = (struct r600_resource*)vb->buffer; assert(rbuffer); - va = r600_resource_va(&rctx->screen->screen, &rbuffer->b.b); + va = r600_resource_va(&rctx->screen->b.b, &rbuffer->b.b); va += vb->buffer_offset; /* fetch resources start at index 992 */ - r600_write_value(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags); - r600_write_value(cs, (resource_offset + buffer_index) * 8); - r600_write_value(cs, va); /* RESOURCEi_WORD0 */ - r600_write_value(cs, rbuffer->buf->size - vb->buffer_offset - 1); /* RESOURCEi_WORD1 */ - r600_write_value(cs, /* RESOURCEi_WORD2 */ + radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags); + radeon_emit(cs, (resource_offset + buffer_index) * 8); + radeon_emit(cs, va); /* RESOURCEi_WORD0 */ + radeon_emit(cs, rbuffer->buf->size - vb->buffer_offset - 1); /* RESOURCEi_WORD1 */ + radeon_emit(cs, /* RESOURCEi_WORD2 */ S_030008_ENDIAN_SWAP(r600_endian_swap(32)) | S_030008_STRIDE(vb->stride) | S_030008_BASE_ADDRESS_HI(va >> 32UL)); - r600_write_value(cs, /* RESOURCEi_WORD3 */ + radeon_emit(cs, /* RESOURCEi_WORD3 */ S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) | S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) | S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) | S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W)); - r600_write_value(cs, 0); /* RESOURCEi_WORD4 */ - r600_write_value(cs, 0); /* RESOURCEi_WORD5 */ - r600_write_value(cs, 0); /* RESOURCEi_WORD6 */ - r600_write_value(cs, 0xc0000000); /* RESOURCEi_WORD7 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD6 */ + radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD7 */ - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, rbuffer, RADEON_USAGE_READ)); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ)); } state->dirty_mask = 0; } @@ -2487,7 +2487,7 @@ static void evergreen_emit_constant_buffers(struct r600_context *rctx, unsigned reg_alu_const_cache, unsigned pkt_flags) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint32_t dirty_mask = state->dirty_mask; while (dirty_mask) { @@ -2500,7 +2500,7 @@ static void evergreen_emit_constant_buffers(struct r600_context *rctx, rbuffer = (struct r600_resource*)cb->buffer; assert(rbuffer); - va = r600_resource_va(&rctx->screen->screen, &rbuffer->b.b); + va = r600_resource_va(&rctx->screen->b.b, &rbuffer->b.b); va += cb->buffer_offset; r600_write_context_reg_flag(cs, reg_alu_constbuf_size + buffer_index * 4, @@ -2508,29 +2508,29 @@ static void evergreen_emit_constant_buffers(struct r600_context *rctx, r600_write_context_reg_flag(cs, reg_alu_const_cache + buffer_index * 4, va >> 8, pkt_flags); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, rbuffer, RADEON_USAGE_READ)); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ)); - r600_write_value(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags); - r600_write_value(cs, (buffer_id_base + buffer_index) * 8); - r600_write_value(cs, va); /* RESOURCEi_WORD0 */ - r600_write_value(cs, rbuffer->buf->size - cb->buffer_offset - 1); /* RESOURCEi_WORD1 */ - r600_write_value(cs, /* RESOURCEi_WORD2 */ + radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0) | pkt_flags); + radeon_emit(cs, (buffer_id_base + buffer_index) * 8); + radeon_emit(cs, va); /* RESOURCEi_WORD0 */ + radeon_emit(cs, rbuffer->buf->size - cb->buffer_offset - 1); /* RESOURCEi_WORD1 */ + radeon_emit(cs, /* RESOURCEi_WORD2 */ S_030008_ENDIAN_SWAP(r600_endian_swap(32)) | S_030008_STRIDE(16) | S_030008_BASE_ADDRESS_HI(va >> 32UL)); - r600_write_value(cs, /* RESOURCEi_WORD3 */ + radeon_emit(cs, /* RESOURCEi_WORD3 */ S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) | S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) | S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) | S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W)); - r600_write_value(cs, 0); /* RESOURCEi_WORD4 */ - r600_write_value(cs, 0); /* RESOURCEi_WORD5 */ - r600_write_value(cs, 0); /* RESOURCEi_WORD6 */ - r600_write_value(cs, 0xc0000000); /* RESOURCEi_WORD7 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD6 */ + radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD7 */ - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, rbuffer, RADEON_USAGE_READ)); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0) | pkt_flags); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ)); dirty_mask &= ~(1 << buffer_index); } @@ -2573,7 +2573,7 @@ static void evergreen_emit_sampler_views(struct r600_context *rctx, struct r600_samplerview_state *state, unsigned resource_id_base) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint32_t dirty_mask = state->dirty_mask; while (dirty_mask) { @@ -2584,18 +2584,18 @@ static void evergreen_emit_sampler_views(struct r600_context *rctx, rview = state->views[resource_index]; assert(rview); - r600_write_value(cs, PKT3(PKT3_SET_RESOURCE, 8, 0)); - r600_write_value(cs, (resource_id_base + resource_index) * 8); - r600_write_array(cs, 8, rview->tex_resource_words); + radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 8, 0)); + radeon_emit(cs, (resource_id_base + resource_index) * 8); + radeon_emit_array(cs, rview->tex_resource_words, 8); - reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, rview->tex_resource, + reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->tex_resource, RADEON_USAGE_READ); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); if (!rview->skip_mip_address_reloc) { - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); } } state->dirty_mask = 0; @@ -2621,7 +2621,7 @@ static void evergreen_emit_sampler_states(struct r600_context *rctx, unsigned resource_id_base, unsigned border_index_reg) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint32_t dirty_mask = texinfo->states.dirty_mask; while (dirty_mask) { @@ -2631,14 +2631,14 @@ static void evergreen_emit_sampler_states(struct r600_context *rctx, rstate = texinfo->states.states[i]; assert(rstate); - r600_write_value(cs, PKT3(PKT3_SET_SAMPLER, 3, 0)); - r600_write_value(cs, (resource_id_base + i) * 3); - r600_write_array(cs, 3, rstate->tex_sampler_words); + radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0)); + radeon_emit(cs, (resource_id_base + i) * 3); + radeon_emit_array(cs, rstate->tex_sampler_words, 3); if (rstate->border_color_use) { r600_write_config_reg_seq(cs, border_index_reg, 5); - r600_write_value(cs, i); - r600_write_array(cs, 4, rstate->border_color.ui); + radeon_emit(cs, i); + radeon_emit_array(cs, rstate->border_color.ui, 4); } } texinfo->states.dirty_mask = 0; @@ -2664,31 +2664,31 @@ static void evergreen_emit_sample_mask(struct r600_context *rctx, struct r600_at struct r600_sample_mask *s = (struct r600_sample_mask*)a; uint8_t mask = s->sample_mask; - r600_write_context_reg(rctx->rings.gfx.cs, R_028C3C_PA_SC_AA_MASK, + r600_write_context_reg(rctx->b.rings.gfx.cs, R_028C3C_PA_SC_AA_MASK, mask | (mask << 8) | (mask << 16) | (mask << 24)); } static void cayman_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a) { struct r600_sample_mask *s = (struct r600_sample_mask*)a; - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint16_t mask = s->sample_mask; r600_write_context_reg_seq(cs, CM_R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); - r600_write_value(cs, mask | (mask << 16)); /* X0Y0_X1Y0 */ - r600_write_value(cs, mask | (mask << 16)); /* X0Y1_X1Y1 */ + radeon_emit(cs, mask | (mask << 16)); /* X0Y0_X1Y0 */ + radeon_emit(cs, mask | (mask << 16)); /* X0Y1_X1Y1 */ } static void evergreen_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_cso_state *state = (struct r600_cso_state*)a; struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso; r600_write_context_reg(cs, R_0288A4_SQ_PGM_START_FS, - (r600_resource_va(rctx->context.screen, &shader->buffer->b.b) + shader->offset) >> 8); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, shader->buffer, RADEON_USAGE_READ)); + (r600_resource_va(rctx->b.b.screen, &shader->buffer->b.b) + shader->offset) >> 8); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer, RADEON_USAGE_READ)); } void cayman_init_common_regs(struct r600_command_buffer *cb, @@ -2729,8 +2729,8 @@ static void cayman_init_atom_start_cs(struct r600_context *rctx) r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - cayman_init_common_regs(cb, rctx->chip_class, - rctx->family, rctx->screen->info.drm_minor); + cayman_init_common_regs(cb, rctx->b.chip_class, + rctx->b.family, rctx->screen->b.info.drm_minor); r600_store_config_reg(cb, R_009100_SPI_CONFIG_CNTL, 0); r600_store_config_reg(cb, R_00913C_SPI_CONFIG_CNTL_1, S_00913C_VTX_DONE_DELAY(4)); @@ -3008,7 +3008,7 @@ void evergreen_init_atom_start_cs(struct r600_context *rctx) enum radeon_family family; unsigned tmp; - if (rctx->chip_class == CAYMAN) { + if (rctx->b.chip_class == CAYMAN) { cayman_init_atom_start_cs(rctx); return; } @@ -3024,10 +3024,10 @@ void evergreen_init_atom_start_cs(struct r600_context *rctx) r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - evergreen_init_common_regs(cb, rctx->chip_class, - rctx->family, rctx->screen->info.drm_minor); + evergreen_init_common_regs(cb, rctx->b.chip_class, + rctx->b.family, rctx->screen->b.info.drm_minor); - family = rctx->family; + family = rctx->b.family; switch (family) { case CHIP_CEDAR: default: @@ -3538,7 +3538,7 @@ void *evergreen_create_resolve_blend(struct r600_context *rctx) memset(&blend, 0, sizeof(blend)); blend.independent_blend_enable = true; blend.rt[0].colormask = 0xf; - return evergreen_create_blend_state_mode(&rctx->context, &blend, V_028808_CB_RESOLVE); + return evergreen_create_blend_state_mode(&rctx->b.b, &blend, V_028808_CB_RESOLVE); } void *evergreen_create_decompress_blend(struct r600_context *rctx) @@ -3550,14 +3550,14 @@ void *evergreen_create_decompress_blend(struct r600_context *rctx) memset(&blend, 0, sizeof(blend)); blend.independent_blend_enable = true; blend.rt[0].colormask = 0xf; - return evergreen_create_blend_state_mode(&rctx->context, &blend, mode); + return evergreen_create_blend_state_mode(&rctx->b.b, &blend, mode); } void *evergreen_create_db_flush_dsa(struct r600_context *rctx) { struct pipe_depth_stencil_alpha_state dsa = {{0}}; - return rctx->context.create_depth_stencil_alpha_state(&rctx->context, &dsa); + return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa); } void evergreen_update_db_shader_control(struct r600_context * rctx) @@ -3610,7 +3610,7 @@ static void evergreen_dma_copy_tile(struct r600_context *rctx, unsigned pitch, unsigned bpp) { - struct radeon_winsys_cs *cs = rctx->rings.dma.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs; struct r600_texture *rsrc = (struct r600_texture*)src; struct r600_texture *rdst = (struct r600_texture*)dst; unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size; @@ -3619,7 +3619,7 @@ static void evergreen_dma_copy_tile(struct r600_context *rctx, uint64_t base, addr; /* make sure that the dma ring is only one active */ - rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC); + rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC); dst_mode = rdst->surface.level[dst_level].mode; src_mode = rsrc->surface.level[src_level].mode; @@ -3661,8 +3661,8 @@ static void evergreen_dma_copy_tile(struct r600_context *rctx, bank_w = eg_bank_wh(rsrc->surface.bankw); mt_aspect = eg_macro_tile_aspect(rsrc->surface.mtilea); tile_split = eg_tile_split(rsrc->surface.tile_split); - base += r600_resource_va(&rctx->screen->screen, src); - addr += r600_resource_va(&rctx->screen->screen, dst); + base += r600_resource_va(&rctx->screen->b.b, src); + addr += r600_resource_va(&rctx->screen->b.b, dst); } else { /* L2T */ array_mode = evergreen_array_mode(dst_mode); @@ -3686,8 +3686,8 @@ static void evergreen_dma_copy_tile(struct r600_context *rctx, bank_w = eg_bank_wh(rdst->surface.bankw); mt_aspect = eg_macro_tile_aspect(rdst->surface.mtilea); tile_split = eg_tile_split(rdst->surface.tile_split); - base += r600_resource_va(&rctx->screen->screen, dst); - addr += r600_resource_va(&rctx->screen->screen, src); + base += r600_resource_va(&rctx->screen->b.b, dst); + addr += r600_resource_va(&rctx->screen->b.b, src); } size = (copy_height * pitch) >> 2; @@ -3701,8 +3701,8 @@ static void evergreen_dma_copy_tile(struct r600_context *rctx, } size = (cheight * pitch) >> 2; /* emit reloc before writting cs so that cs is always in consistent state */ - r600_context_bo_reloc(rctx, &rctx->rings.dma, &rsrc->resource, RADEON_USAGE_READ); - r600_context_bo_reloc(rctx, &rctx->rings.dma, &rdst->resource, RADEON_USAGE_WRITE); + r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rsrc->resource, RADEON_USAGE_READ); + r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rdst->resource, RADEON_USAGE_WRITE); cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, sub_cmd, size); cs->buf[cs->cdw++] = base >> 8; cs->buf[cs->cdw++] = (detile << 31) | (array_mode << 27) | @@ -3735,7 +3735,7 @@ boolean evergreen_dma_blit(struct pipe_context *ctx, unsigned src_w, dst_w; unsigned src_x, src_y; - if (rctx->rings.dma.cs == NULL) { + if (rctx->b.rings.dma.cs == NULL) { return FALSE; } if (src->format != dst->format) { @@ -3776,7 +3776,7 @@ boolean evergreen_dma_blit(struct pipe_context *ctx, * DMA only supports it on the tiled side. As such * the tile order is backwards after a L2T/T2L packet. */ - if ((rctx->chip_class == CAYMAN) && + if ((rctx->b.chip_class == CAYMAN) && (src_mode != dst_mode) && (util_format_get_blocksize(src->format) >= 16)) { return FALSE; @@ -3840,7 +3840,7 @@ void evergreen_init_state_functions(struct r600_context *rctx) r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 7); - if (rctx->chip_class == EVERGREEN) { + if (rctx->b.chip_class == EVERGREEN) { r600_init_atom(rctx, &rctx->sample_mask.atom, id++, evergreen_emit_sample_mask, 3); } else { r600_init_atom(rctx, &rctx->sample_mask.atom, id++, cayman_emit_sample_mask, 4); @@ -3862,22 +3862,22 @@ void evergreen_init_state_functions(struct r600_context *rctx) r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4); r600_init_atom(rctx, &rctx->viewport.atom, id++, r600_emit_viewport_state, 8); r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, evergreen_emit_vertex_fetch_shader, 5); - r600_init_atom(rctx, &rctx->streamout.begin_atom, id++, r600_emit_streamout_begin, 0); + rctx->atoms[id++] = &rctx->b.streamout.begin_atom; r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23); r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0); - rctx->context.create_blend_state = evergreen_create_blend_state; - rctx->context.create_depth_stencil_alpha_state = evergreen_create_dsa_state; - rctx->context.create_rasterizer_state = evergreen_create_rs_state; - rctx->context.create_sampler_state = evergreen_create_sampler_state; - rctx->context.create_sampler_view = evergreen_create_sampler_view; - rctx->context.set_framebuffer_state = evergreen_set_framebuffer_state; - rctx->context.set_polygon_stipple = evergreen_set_polygon_stipple; - rctx->context.set_scissor_states = evergreen_set_scissor_states; - - if (rctx->chip_class == EVERGREEN) - rctx->context.get_sample_position = evergreen_get_sample_position; + rctx->b.b.create_blend_state = evergreen_create_blend_state; + rctx->b.b.create_depth_stencil_alpha_state = evergreen_create_dsa_state; + rctx->b.b.create_rasterizer_state = evergreen_create_rs_state; + rctx->b.b.create_sampler_state = evergreen_create_sampler_state; + rctx->b.b.create_sampler_view = evergreen_create_sampler_view; + rctx->b.b.set_framebuffer_state = evergreen_set_framebuffer_state; + rctx->b.b.set_polygon_stipple = evergreen_set_polygon_stipple; + rctx->b.b.set_scissor_states = evergreen_set_scissor_states; + + if (rctx->b.chip_class == EVERGREEN) + rctx->b.b.get_sample_position = evergreen_get_sample_position; else - rctx->context.get_sample_position = cayman_get_sample_position; + rctx->b.b.get_sample_position = cayman_get_sample_position; evergreen_init_compute_state_functions(rctx); } diff --git a/src/gallium/drivers/r600/evergreend.h b/src/gallium/drivers/r600/evergreend.h index 8990d6c3fad..2f2e1455d59 100644 --- a/src/gallium/drivers/r600/evergreend.h +++ b/src/gallium/drivers/r600/evergreend.h @@ -161,7 +161,7 @@ #define PKT3_CP_DMA_CMD_DAIC (1 << 29) /* Registers */ -#define R_0084FC_CP_STRMOUT_CNTL 0x000084FC +#define R_0084FC_CP_STRMOUT_CNTL 0x0084FC #define S_0084FC_OFFSET_UPDATE_DONE(x) (((x) & 0x1) << 0) #define R_008960_VGT_STRMOUT_BUFFER_FILLED_SIZE_0 0x008960 /* read-only */ #define R_008964_VGT_STRMOUT_BUFFER_FILLED_SIZE_1 0x008964 /* read-only */ @@ -1997,17 +1997,17 @@ #define S_028B8C_OFFSET(x) (((x) & 0xFFFFFFFF) << 0) #define G_028B8C_OFFSET(x) (((x) >> 0) & 0xFFFFFFFF) #define C_028B8C_OFFSET 0x00000000 -#define R_028B94_VGT_STRMOUT_CONFIG 0x00028B94 +#define R_028B94_VGT_STRMOUT_CONFIG 0x028B94 #define S_028B94_STREAMOUT_0_EN(x) (((x) & 0x1) << 0) #define S_028B94_STREAMOUT_1_EN(x) (((x) & 0x1) << 1) #define S_028B94_STREAMOUT_2_EN(x) (((x) & 0x1) << 2) #define S_028B94_STREAMOUT_3_EN(x) (((x) & 0x1) << 3) -#define S_028B94_RAST_STREAM(x) (((x) & 0x7) << 4) -#define R_028B98_VGT_STRMOUT_BUFFER_CONFIG 0x00028B98 -#define S_028B98_STREAM_0_BUFFER_EN(x) (((x) & 0xf) << 0) -#define S_028B98_STREAM_1_BUFFER_EN(x) (((x) & 0xf) << 4) -#define S_028B98_STREAM_2_BUFFER_EN(x) (((x) & 0xf) << 8) -#define S_028B98_STREAM_3_BUFFER_EN(x) (((x) & 0xf) << 12) +#define S_028B94_RAST_STREAM(x) (((x) & 0x07) << 4) +#define R_028B98_VGT_STRMOUT_BUFFER_CONFIG 0x028B98 +#define S_028B98_STREAM_0_BUFFER_EN(x) (((x) & 0x0F) << 0) +#define S_028B98_STREAM_1_BUFFER_EN(x) (((x) & 0x0F) << 4) +#define S_028B98_STREAM_2_BUFFER_EN(x) (((x) & 0x0F) << 8) +#define S_028B98_STREAM_3_BUFFER_EN(x) (((x) & 0x0F) << 12) #define R_028C00_PA_SC_LINE_CNTL 0x00028C00 #define S_028C00_EXPAND_LINE_WIDTH(x) (((x) & 0x1) << 9) #define G_028C00_EXPAND_LINE_WIDTH(x) (((x) >> 9) & 0x1) diff --git a/src/gallium/drivers/r600/r600_asm.c b/src/gallium/drivers/r600/r600_asm.c index a0492a66e19..235188bd35f 100644 --- a/src/gallium/drivers/r600/r600_asm.c +++ b/src/gallium/drivers/r600/r600_asm.c @@ -2276,7 +2276,7 @@ void *r600_create_vertex_fetch_shader(struct pipe_context *ctx, struct r600_bytecode bc; struct r600_bytecode_vtx vtx; const struct util_format_description *desc; - unsigned fetch_resource_start = rctx->chip_class >= EVERGREEN ? 0 : 160; + unsigned fetch_resource_start = rctx->b.chip_class >= EVERGREEN ? 0 : 160; unsigned format, num_format, format_comp, endian; uint32_t *bytecode; int i, j, r, fs_size; @@ -2287,14 +2287,14 @@ void *r600_create_vertex_fetch_shader(struct pipe_context *ctx, assert(count < 32); memset(&bc, 0, sizeof(bc)); - r600_bytecode_init(&bc, rctx->chip_class, rctx->family, + r600_bytecode_init(&bc, rctx->b.chip_class, rctx->b.family, rctx->screen->has_compressed_msaa_texturing); bc.isa = rctx->isa; for (i = 0; i < count; i++) { if (elements[i].instance_divisor > 1) { - if (rctx->chip_class == CAYMAN) { + if (rctx->b.chip_class == CAYMAN) { for (j = 0; j < 4; j++) { struct r600_bytecode_alu alu; memset(&alu, 0, sizeof(alu)); @@ -2425,7 +2425,7 @@ void *r600_create_vertex_fetch_shader(struct pipe_context *ctx, } else { memcpy(bytecode, bc.bytecode, fs_size); } - rctx->ws->buffer_unmap(shader->buffer->cs_buf); + rctx->b.ws->buffer_unmap(shader->buffer->cs_buf); r600_bytecode_clear(&bc); return shader; diff --git a/src/gallium/drivers/r600/r600_blit.c b/src/gallium/drivers/r600/r600_blit.c index 1c22a75582b..60dda28e184 100644 --- a/src/gallium/drivers/r600/r600_blit.c +++ b/src/gallium/drivers/r600/r600_blit.c @@ -58,8 +58,8 @@ static void r600_blitter_begin(struct pipe_context *ctx, enum r600_blitter_op op util_blitter_save_vertex_buffer_slot(rctx->blitter, rctx->vertex_buffer_state.vb); util_blitter_save_vertex_elements(rctx->blitter, rctx->vertex_fetch_shader.cso); util_blitter_save_vertex_shader(rctx->blitter, rctx->vs_shader); - util_blitter_save_so_targets(rctx->blitter, rctx->streamout.num_targets, - (struct pipe_stream_output_target**)rctx->streamout.targets); + util_blitter_save_so_targets(rctx->blitter, rctx->b.streamout.num_targets, + (struct pipe_stream_output_target**)rctx->b.streamout.targets); util_blitter_save_rasterizer(rctx->blitter, rctx->rasterizer_state.cso); if (op & R600_SAVE_FRAGMENT_STATE) { @@ -127,13 +127,13 @@ void r600_blit_decompress_depth(struct pipe_context *ctx, /* XXX Decompressing MSAA depth textures is broken on R6xx. * There is also a hardlock if CMASK and FMASK are not present. * Just skip this until we find out how to fix it. */ - if (rctx->chip_class == R600 && max_sample > 0) { + if (rctx->b.chip_class == R600 && max_sample > 0) { texture->dirty_level_mask = 0; return; } - if (rctx->family == CHIP_RV610 || rctx->family == CHIP_RV630 || - rctx->family == CHIP_RV620 || rctx->family == CHIP_RV635) + if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 || + rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635) depth = 0.0f; else depth = 1.0f; @@ -227,12 +227,12 @@ static void r600_blit_decompress_depth_in_place(struct r600_context *rctx, surf_tmpl.u.tex.first_layer = layer; surf_tmpl.u.tex.last_layer = layer; - zsurf = rctx->context.create_surface(&rctx->context, &texture->resource.b.b, &surf_tmpl); + zsurf = rctx->b.b.create_surface(&rctx->b.b, &texture->resource.b.b, &surf_tmpl); - r600_blitter_begin(&rctx->context, R600_DECOMPRESS); + r600_blitter_begin(&rctx->b.b, R600_DECOMPRESS); util_blitter_custom_depth_stencil(rctx->blitter, zsurf, NULL, ~0, rctx->custom_dsa_flush, 1.0f); - r600_blitter_end(&rctx->context); + r600_blitter_end(&rctx->b.b); pipe_surface_reference(&zsurf, NULL); } @@ -267,13 +267,13 @@ void r600_decompress_depth_textures(struct r600_context *rctx, tex = (struct r600_texture *)view->texture; assert(tex->is_depth && !tex->is_flushing_texture); - if (rctx->chip_class >= EVERGREEN || + if (rctx->b.chip_class >= EVERGREEN || r600_can_read_depth(tex)) { r600_blit_decompress_depth_in_place(rctx, tex, view->u.tex.first_level, view->u.tex.last_level, 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level)); } else { - r600_blit_decompress_depth(&rctx->context, tex, NULL, + r600_blit_decompress_depth(&rctx->b.b, tex, NULL, view->u.tex.first_level, view->u.tex.last_level, 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level), 0, u_max_sample(&tex->resource.b.b)); @@ -343,7 +343,7 @@ void r600_decompress_color_textures(struct r600_context *rctx, tex = (struct r600_texture *)view->texture; assert(tex->cmask_size && tex->fmask_size); - r600_blit_decompress_color(&rctx->context, tex, + r600_blit_decompress_color(&rctx->b.b, tex, view->u.tex.first_level, view->u.tex.last_level, 0, util_max_layer(&tex->resource.b.b, view->u.tex.first_level)); } @@ -362,7 +362,7 @@ static bool r600_decompress_subresource(struct pipe_context *ctx, struct r600_texture *rtex = (struct r600_texture*)tex; if (rtex->is_depth && !rtex->is_flushing_texture) { - if (rctx->chip_class >= EVERGREEN || + if (rctx->b.chip_class >= EVERGREEN || r600_can_read_depth(rtex)) { r600_blit_decompress_depth_in_place(rctx, rtex, level, level, @@ -438,7 +438,7 @@ static bool can_fast_clear_color(struct pipe_context *ctx) struct pipe_framebuffer_state *fb = &rctx->framebuffer.state; int i; - if (rctx->chip_class < EVERGREEN) { + if (rctx->b.chip_class < EVERGREEN) { return false; } @@ -589,7 +589,7 @@ static void r600_clear_buffer(struct pipe_context *ctx, struct pipe_resource *ds uint32_t v = value; if (rctx->screen->has_cp_dma && - rctx->chip_class >= EVERGREEN && + rctx->b.chip_class >= EVERGREEN && offset % 4 == 0 && size % 4 == 0) { uint32_t clear_value = v | (v << 8) | (v << 16) | (v << 24); @@ -750,7 +750,7 @@ static void r600_resource_copy_region(struct pipe_context *ctx, dst_view = r600_create_surface_custom(ctx, dst, &dst_templ, dst_width, dst_height); - if (rctx->chip_class >= EVERGREEN) { + if (rctx->b.chip_class >= EVERGREEN) { src_view = evergreen_create_sampler_view_custom(ctx, src, &src_templ, src_width0, src_height0); } else { @@ -817,7 +817,7 @@ static void r600_msaa_color_resolve(struct pipe_context *ctx, struct pipe_resource *tmp, templ; struct pipe_blit_info blit; unsigned sample_mask = - rctx->chip_class == CAYMAN ? ~0 : + rctx->b.chip_class == CAYMAN ? ~0 : ((1ull << MAX2(1, info->src.resource->nr_samples)) - 1); assert(info->src.level == 0); @@ -902,9 +902,9 @@ static void r600_blit(struct pipe_context *ctx, void r600_init_blit_functions(struct r600_context *rctx) { - rctx->context.clear = r600_clear; - rctx->context.clear_render_target = r600_clear_render_target; - rctx->context.clear_depth_stencil = r600_clear_depth_stencil; - rctx->context.resource_copy_region = r600_resource_copy_region; - rctx->context.blit = r600_blit; + rctx->b.b.clear = r600_clear; + rctx->b.b.clear_render_target = r600_clear_render_target; + rctx->b.b.clear_depth_stencil = r600_clear_depth_stencil; + rctx->b.b.resource_copy_region = r600_resource_copy_region; + rctx->b.b.blit = r600_blit; } diff --git a/src/gallium/drivers/r600/r600_buffer.c b/src/gallium/drivers/r600/r600_buffer.c index a977b02ac4e..88281b07106 100644 --- a/src/gallium/drivers/r600/r600_buffer.c +++ b/src/gallium/drivers/r600/r600_buffer.c @@ -113,7 +113,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, /* Check if mapping this buffer would cause waiting for the GPU. */ if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || - rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { + rctx->b.ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { unsigned i, mask; /* Discard the buffer. */ @@ -135,13 +135,13 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, } } /* Streamout buffers. */ - for (i = 0; i < rctx->streamout.num_targets; i++) { - if (rctx->streamout.targets[i]->b.buffer == &rbuffer->b.b) { - if (rctx->streamout.begin_emitted) { - r600_emit_streamout_end(rctx); + for (i = 0; i < rctx->b.streamout.num_targets; i++) { + if (rctx->b.streamout.targets[i]->b.buffer == &rbuffer->b.b) { + if (rctx->b.streamout.begin_emitted) { + r600_emit_streamout_end(&rctx->b); } - rctx->streamout.append_bitmask = rctx->streamout.enabled_mask; - r600_streamout_buffers_dirty(rctx); + rctx->b.streamout.append_bitmask = rctx->b.streamout.enabled_mask; + r600_streamout_buffers_dirty(&rctx->b); } } /* Constant buffers. */ @@ -159,7 +159,7 @@ static void *r600_buffer_transfer_map(struct pipe_context *ctx, /* Check if mapping this buffer would cause waiting for the GPU. */ if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || - rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { + rctx->b.ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { /* Do a wait-free write-only transfer using a temporary buffer. */ unsigned offset; struct r600_resource *staging = NULL; @@ -203,8 +203,8 @@ static void r600_buffer_transfer_unmap(struct pipe_context *pipe, doffset = transfer->box.x; soffset = rtransfer->offset + transfer->box.x % R600_MAP_BUFFER_ALIGNMENT; /* Copy the staging buffer into the original one. */ - if (rctx->rings.dma.cs && !(size % 4) && !(doffset % 4) && !(soffset % 4)) { - if (rctx->screen->chip_class >= EVERGREEN) { + if (rctx->b.rings.dma.cs && !(size % 4) && !(doffset % 4) && !(soffset % 4)) { + if (rctx->screen->b.chip_class >= EVERGREEN) { evergreen_dma_copy(rctx, dst, src, doffset, soffset, size); } else { r600_dma_copy(rctx, dst, src, doffset, soffset, size); @@ -269,21 +269,21 @@ bool r600_init_resource(struct r600_screen *rscreen, break; } - res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment, + res->buf = rscreen->b.ws->buffer_create(rscreen->b.ws, size, alignment, use_reusable_pool, initial_domain); if (!res->buf) { return false; } - res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf); + res->cs_buf = rscreen->b.ws->buffer_get_cs_handle(res->buf); res->domains = domains; util_range_set_empty(&res->valid_buffer_range); if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) { fprintf(stderr, "VM start=0x%llX end=0x%llX | Buffer %u bytes\n", - r600_resource_va(&rscreen->screen, &res->b.b), - r600_resource_va(&rscreen->screen, &res->b.b) + res->buf->size, + r600_resource_va(&rscreen->b.b, &res->b.b), + r600_resource_va(&rscreen->b.b, &res->b.b) + res->buf->size, res->buf->size); } return true; diff --git a/src/gallium/drivers/r600/r600_hw_context.c b/src/gallium/drivers/r600/r600_hw_context.c index 97b0f9cb0ef..d985af9879f 100644 --- a/src/gallium/drivers/r600/r600_hw_context.c +++ b/src/gallium/drivers/r600/r600_hw_context.c @@ -32,20 +32,20 @@ /* Get backends mask */ void r600_get_backend_mask(struct r600_context *ctx) { - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; struct r600_resource *buffer; uint32_t *results; - unsigned num_backends = ctx->screen->info.r600_num_backends; + unsigned num_backends = ctx->screen->b.info.r600_num_backends; unsigned i, mask = 0; uint64_t va; /* if backend_map query is supported by the kernel */ - if (ctx->screen->info.r600_backend_map_valid) { - unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes; - unsigned backend_map = ctx->screen->info.r600_backend_map; + if (ctx->screen->b.info.r600_backend_map_valid) { + unsigned num_tile_pipes = ctx->screen->b.info.r600_num_tile_pipes; + unsigned backend_map = ctx->screen->b.info.r600_backend_map; unsigned item_width, item_mask; - if (ctx->chip_class >= EVERGREEN) { + if (ctx->b.chip_class >= EVERGREEN) { item_width = 4; item_mask = 0x7; } else { @@ -68,17 +68,17 @@ void r600_get_backend_mask(struct r600_context *ctx) /* create buffer for event data */ buffer = (struct r600_resource*) - pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM, + pipe_buffer_create(&ctx->screen->b.b, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, ctx->max_db*16); if (!buffer) goto err; - va = r600_resource_va(&ctx->screen->screen, (void*)buffer); + va = r600_resource_va(&ctx->screen->b.b, (void*)buffer); /* initialize buffer with zeroes */ results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE); if (results) { memset(results, 0, ctx->max_db * 4 * 4); - ctx->ws->buffer_unmap(buffer->cs_buf); + ctx->b.ws->buffer_unmap(buffer->cs_buf); /* emit EVENT_WRITE for ZPASS_DONE */ cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0); @@ -87,7 +87,7 @@ void r600_get_backend_mask(struct r600_context *ctx) cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF; cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, buffer, RADEON_USAGE_WRITE); + cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, buffer, RADEON_USAGE_WRITE); /* analyze results */ results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ); @@ -97,7 +97,7 @@ void r600_get_backend_mask(struct r600_context *ctx) if (results[i*4 + 1]) mask |= (1<ws->buffer_unmap(buffer->cs_buf); + ctx->b.ws->buffer_unmap(buffer->cs_buf); } } @@ -117,18 +117,18 @@ err: void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, boolean count_draw_in) { - if (!ctx->ws->cs_memory_below_limit(ctx->rings.gfx.cs, ctx->vram, ctx->gtt)) { - ctx->gtt = 0; - ctx->vram = 0; - ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC); + if (!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs, ctx->b.vram, ctx->b.gtt)) { + ctx->b.gtt = 0; + ctx->b.vram = 0; + ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC); return; } /* all will be accounted once relocation are emited */ - ctx->gtt = 0; - ctx->vram = 0; + ctx->b.gtt = 0; + ctx->b.vram = 0; /* The number of dwords we already used in the CS so far. */ - num_dw += ctx->rings.gfx.cs->cdw; + num_dw += ctx->b.rings.gfx.cs->cdw; if (count_draw_in) { unsigned i; @@ -154,8 +154,8 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, num_dw += ctx->num_cs_dw_nontimer_queries_suspend; /* Count in streamout_end at the end of CS. */ - if (ctx->streamout.begin_emitted) { - num_dw += ctx->streamout.num_dw_for_end; + if (ctx->b.streamout.begin_emitted) { + num_dw += ctx->b.streamout.num_dw_for_end; } /* Count in render_condition(NULL) at the end of CS. */ @@ -164,7 +164,7 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, } /* SX_MISC */ - if (ctx->chip_class <= R700) { + if (ctx->b.chip_class <= R700) { num_dw += 3; } @@ -176,48 +176,48 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw, /* Flush if there's not enough space. */ if (num_dw > RADEON_MAX_CMDBUF_DWORDS) { - ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC); + ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC); } } void r600_flush_emit(struct r600_context *rctx) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; unsigned cp_coher_cntl = 0; unsigned wait_until = 0; - if (!rctx->flags) { + if (!rctx->b.flags) { return; } - if (rctx->flags & R600_CONTEXT_WAIT_3D_IDLE) { + if (rctx->b.flags & R600_CONTEXT_WAIT_3D_IDLE) { wait_until |= S_008040_WAIT_3D_IDLE(1); } - if (rctx->flags & R600_CONTEXT_WAIT_CP_DMA_IDLE) { + if (rctx->b.flags & R600_CONTEXT_WAIT_CP_DMA_IDLE) { wait_until |= S_008040_WAIT_CP_DMA_IDLE(1); } if (wait_until) { /* Use of WAIT_UNTIL is deprecated on Cayman+ */ - if (rctx->family >= CHIP_CAYMAN) { + if (rctx->b.family >= CHIP_CAYMAN) { /* emit a PS partial flush on Cayman/TN */ - rctx->flags |= R600_CONTEXT_PS_PARTIAL_FLUSH; + rctx->b.flags |= R600_CONTEXT_PS_PARTIAL_FLUSH; } } - if (rctx->flags & R600_CONTEXT_PS_PARTIAL_FLUSH) { + if (rctx->b.flags & R600_CONTEXT_PS_PARTIAL_FLUSH) { cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4); } - if (rctx->chip_class >= R700 && - (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB_META)) { + if (rctx->b.chip_class >= R700 && + (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_CB_META)) { cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0); } - if (rctx->chip_class >= R700 && - (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META)) { + if (rctx->b.chip_class >= R700 && + (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_DB_META)) { cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0); @@ -230,27 +230,27 @@ void r600_flush_emit(struct r600_context *rctx) cp_coher_cntl |= S_0085F0_FULL_CACHE_ENA(1); } - if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV) { + if (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV) { cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0); } - if (rctx->flags & R600_CONTEXT_INV_CONST_CACHE) { + if (rctx->b.flags & R600_CONTEXT_INV_CONST_CACHE) { cp_coher_cntl |= S_0085F0_SH_ACTION_ENA(1); } - if (rctx->flags & R600_CONTEXT_INV_VERTEX_CACHE) { + if (rctx->b.flags & R600_CONTEXT_INV_VERTEX_CACHE) { cp_coher_cntl |= rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1) : S_0085F0_TC_ACTION_ENA(1); } - if (rctx->flags & R600_CONTEXT_INV_TEX_CACHE) { + if (rctx->b.flags & R600_CONTEXT_INV_TEX_CACHE) { cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1); } /* Don't use the DB CP COHER logic on r6xx. * There are hw bugs. */ - if (rctx->chip_class >= R700 && - (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB)) { + if (rctx->b.chip_class >= R700 && + (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_DB)) { cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1) | S_0085F0_SMX_ACTION_ENA(1); @@ -259,8 +259,8 @@ void r600_flush_emit(struct r600_context *rctx) /* Don't use the CB CP COHER logic on r6xx. * There are hw bugs. */ - if (rctx->chip_class >= R700 && - (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_CB)) { + if (rctx->b.chip_class >= R700 && + (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_CB)) { cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) | S_0085F0_CB1_DEST_BASE_ENA(1) | @@ -271,14 +271,14 @@ void r600_flush_emit(struct r600_context *rctx) S_0085F0_CB6_DEST_BASE_ENA(1) | S_0085F0_CB7_DEST_BASE_ENA(1) | S_0085F0_SMX_ACTION_ENA(1); - if (rctx->chip_class >= EVERGREEN) + if (rctx->b.chip_class >= EVERGREEN) cp_coher_cntl |= S_0085F0_CB8_DEST_BASE_ENA(1) | S_0085F0_CB9_DEST_BASE_ENA(1) | S_0085F0_CB10_DEST_BASE_ENA(1) | S_0085F0_CB11_DEST_BASE_ENA(1); } - if (rctx->flags & R600_CONTEXT_STREAMOUT_FLUSH) { + if (rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH) { cp_coher_cntl |= S_0085F0_SO0_DEST_BASE_ENA(1) | S_0085F0_SO1_DEST_BASE_ENA(1) | S_0085F0_SO2_DEST_BASE_ENA(1) | @@ -296,22 +296,22 @@ void r600_flush_emit(struct r600_context *rctx) if (wait_until) { /* Use of WAIT_UNTIL is deprecated on Cayman+ */ - if (rctx->family < CHIP_CAYMAN) { + if (rctx->b.family < CHIP_CAYMAN) { /* wait for things to settle */ r600_write_config_reg(cs, R_008040_WAIT_UNTIL, wait_until); } } /* everything is properly flushed */ - rctx->flags = 0; + rctx->b.flags = 0; } void r600_context_flush(struct r600_context *ctx, unsigned flags) { - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; ctx->nontimer_queries_suspended = false; - ctx->streamout.suspended = false; + ctx->b.streamout.suspended = false; /* suspend queries */ if (ctx->num_cs_dw_nontimer_queries_suspend) { @@ -319,15 +319,15 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags) ctx->nontimer_queries_suspended = true; } - if (ctx->streamout.begin_emitted) { - r600_emit_streamout_end(ctx); - ctx->streamout.suspended = true; + if (ctx->b.streamout.begin_emitted) { + r600_emit_streamout_end(&ctx->b); + ctx->b.streamout.suspended = true; } /* flush is needed to avoid lockups on some chips with user fences * this will also flush the framebuffer cache */ - ctx->flags |= R600_CONTEXT_FLUSH_AND_INV | + ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV | R600_CONTEXT_FLUSH_AND_INV_CB | R600_CONTEXT_FLUSH_AND_INV_DB | R600_CONTEXT_FLUSH_AND_INV_CB_META | @@ -338,7 +338,7 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags) r600_flush_emit(ctx); /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */ - if (ctx->chip_class <= R700) { + if (ctx->b.chip_class <= R700) { r600_write_context_reg(cs, R_028350_SX_MISC, 0); } @@ -348,19 +348,19 @@ void r600_context_flush(struct r600_context *ctx, unsigned flags) } /* Flush the CS. */ - ctx->ws->cs_flush(ctx->rings.gfx.cs, flags, ctx->screen->cs_count++); + ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, ctx->screen->cs_count++); } void r600_begin_new_cs(struct r600_context *ctx) { unsigned shader; - ctx->flags = 0; - ctx->gtt = 0; - ctx->vram = 0; + ctx->b.flags = 0; + ctx->b.gtt = 0; + ctx->b.vram = 0; /* Begin a new CS. */ - r600_emit_command_buffer(ctx->rings.gfx.cs, &ctx->start_cs_cmd); + r600_emit_command_buffer(ctx->b.rings.gfx.cs, &ctx->start_cs_cmd); /* Re-emit states. */ ctx->alphatest_state.atom.dirty = true; @@ -389,7 +389,7 @@ void r600_begin_new_cs(struct r600_context *ctx) if (ctx->rasterizer_state.cso) ctx->rasterizer_state.atom.dirty = true; - if (ctx->chip_class <= R700) { + if (ctx->b.chip_class <= R700) { ctx->seamless_cube_map.atom.dirty = true; } @@ -410,9 +410,9 @@ void r600_begin_new_cs(struct r600_context *ctx) r600_sampler_states_dirty(ctx, &samplers->states); } - if (ctx->streamout.suspended) { - ctx->streamout.append_bitmask = ctx->streamout.enabled_mask; - r600_streamout_buffers_dirty(ctx); + if (ctx->b.streamout.suspended) { + ctx->b.streamout.append_bitmask = ctx->b.streamout.enabled_mask; + r600_streamout_buffers_dirty(&ctx->b); } /* resume queries */ @@ -424,21 +424,21 @@ void r600_begin_new_cs(struct r600_context *ctx) ctx->last_primitive_type = -1; ctx->last_start_instance = -1; - ctx->initial_gfx_cs_size = ctx->rings.gfx.cs->cdw; + ctx->initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw; } void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value) { - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; uint64_t va; r600_need_cs_space(ctx, 10, FALSE); - va = r600_resource_va(&ctx->screen->screen, (void*)fence_bo); + va = r600_resource_va(&ctx->screen->b.b, (void*)fence_bo); va = va + (offset << 2); /* Use of WAIT_UNTIL is deprecated on Cayman+ */ - if (ctx->family >= CHIP_CAYMAN) { + if (ctx->b.family >= CHIP_CAYMAN) { cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4); } else { @@ -453,168 +453,7 @@ void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fen cs->buf[cs->cdw++] = value; /* DATA_LO */ cs->buf[cs->cdw++] = 0; /* DATA_HI */ cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, fence_bo, RADEON_USAGE_WRITE); -} - -static void r600_flush_vgt_streamout(struct r600_context *ctx) -{ - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; - - r600_write_config_reg(cs, R_008490_CP_STRMOUT_CNTL, 0); - - cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0); - cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0); - - cs->buf[cs->cdw++] = PKT3(PKT3_WAIT_REG_MEM, 5, 0); - cs->buf[cs->cdw++] = WAIT_REG_MEM_EQUAL; /* wait until the register is equal to the reference value */ - cs->buf[cs->cdw++] = R_008490_CP_STRMOUT_CNTL >> 2; /* register */ - cs->buf[cs->cdw++] = 0; - cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* reference value */ - cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* mask */ - cs->buf[cs->cdw++] = 4; /* poll interval */ -} - -static void r600_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit) -{ - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; - - if (buffer_enable_bit) { - r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(1)); - r600_write_context_reg(cs, R_028B20_VGT_STRMOUT_BUFFER_EN, buffer_enable_bit); - } else { - r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(0)); - } -} - -void r600_emit_streamout_begin(struct r600_context *ctx, struct r600_atom *atom) -{ - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; - struct r600_so_target **t = ctx->streamout.targets; - unsigned *stride_in_dw = ctx->vs_shader->so.stride; - unsigned i, update_flags = 0; - uint64_t va; - - if (ctx->chip_class >= EVERGREEN) { - evergreen_flush_vgt_streamout(ctx); - evergreen_set_streamout_enable(ctx, ctx->streamout.enabled_mask); - } else { - r600_flush_vgt_streamout(ctx); - r600_set_streamout_enable(ctx, ctx->streamout.enabled_mask); - } - - for (i = 0; i < ctx->streamout.num_targets; i++) { - if (t[i]) { - t[i]->stride_in_dw = stride_in_dw[i]; - t[i]->so_index = i; - va = r600_resource_va(&ctx->screen->screen, - (void*)t[i]->b.buffer); - - update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i); - - r600_write_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 3); - r600_write_value(cs, (t[i]->b.buffer_offset + - t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */ - r600_write_value(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */ - r600_write_value(cs, va >> 8); /* BUFFER_BASE */ - - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = - r600_context_bo_reloc(ctx, &ctx->rings.gfx, r600_resource(t[i]->b.buffer), - RADEON_USAGE_WRITE); - - /* R7xx requires this packet after updating BUFFER_BASE. - * Without this, R7xx locks up. */ - if (ctx->family >= CHIP_RS780 && ctx->family <= CHIP_RV740) { - cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BASE_UPDATE, 1, 0); - cs->buf[cs->cdw++] = i; - cs->buf[cs->cdw++] = va >> 8; - - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = - r600_context_bo_reloc(ctx, &ctx->rings.gfx, r600_resource(t[i]->b.buffer), - RADEON_USAGE_WRITE); - } - - if (ctx->streamout.append_bitmask & (1 << i)) { - va = r600_resource_va(&ctx->screen->screen, - (void*)t[i]->buf_filled_size) + t[i]->buf_filled_size_offset; - /* Append. */ - cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0); - cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) | - STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM); /* control */ - cs->buf[cs->cdw++] = 0; /* unused */ - cs->buf[cs->cdw++] = 0; /* unused */ - cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* src address lo */ - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */ - - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = - r600_context_bo_reloc(ctx, &ctx->rings.gfx, t[i]->buf_filled_size, - RADEON_USAGE_READ); - } else { - /* Start from the beginning. */ - cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0); - cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) | - STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET); /* control */ - cs->buf[cs->cdw++] = 0; /* unused */ - cs->buf[cs->cdw++] = 0; /* unused */ - cs->buf[cs->cdw++] = t[i]->b.buffer_offset >> 2; /* buffer offset in DW */ - cs->buf[cs->cdw++] = 0; /* unused */ - } - } - } - - if (ctx->family > CHIP_R600 && ctx->family < CHIP_RV770) { - cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0); - cs->buf[cs->cdw++] = update_flags; - } - ctx->streamout.begin_emitted = true; -} - -void r600_emit_streamout_end(struct r600_context *ctx) -{ - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; - struct r600_so_target **t = ctx->streamout.targets; - unsigned i; - uint64_t va; - - if (ctx->chip_class >= EVERGREEN) { - evergreen_flush_vgt_streamout(ctx); - } else { - r600_flush_vgt_streamout(ctx); - } - - for (i = 0; i < ctx->streamout.num_targets; i++) { - if (t[i]) { - va = r600_resource_va(&ctx->screen->screen, - (void*)t[i]->buf_filled_size) + t[i]->buf_filled_size_offset; - cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0); - cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) | - STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) | - STRMOUT_STORE_BUFFER_FILLED_SIZE; /* control */ - cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* dst address lo */ - cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* dst address hi */ - cs->buf[cs->cdw++] = 0; /* unused */ - cs->buf[cs->cdw++] = 0; /* unused */ - - cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = - r600_context_bo_reloc(ctx, &ctx->rings.gfx, t[i]->buf_filled_size, - RADEON_USAGE_WRITE); - } - } - - if (ctx->chip_class >= EVERGREEN) { - ctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH; - evergreen_set_streamout_enable(ctx, 0); - } else { - if (ctx->chip_class >= R700) { - ctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH; - } - r600_set_streamout_enable(ctx, 0); - } - ctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; - ctx->streamout.begin_emitted = false; + cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, fence_bo, RADEON_USAGE_WRITE); } /* The max number of bytes to copy per packet. */ @@ -625,18 +464,18 @@ void r600_cp_dma_copy_buffer(struct r600_context *rctx, struct pipe_resource *src, uint64_t src_offset, unsigned size) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; assert(size); assert(rctx->screen->has_cp_dma); - dst_offset += r600_resource_va(&rctx->screen->screen, dst); - src_offset += r600_resource_va(&rctx->screen->screen, src); + dst_offset += r600_resource_va(&rctx->screen->b.b, dst); + src_offset += r600_resource_va(&rctx->screen->b.b, src); /* Flush the caches where the resources are bound. */ r600_flag_resource_cache_flush(rctx, src); r600_flag_resource_cache_flush(rctx, dst); - rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; /* There are differences between R700 and EG in CP DMA, * but we only use the common bits here. */ @@ -645,10 +484,10 @@ void r600_cp_dma_copy_buffer(struct r600_context *rctx, unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT); unsigned src_reloc, dst_reloc; - r600_need_cs_space(rctx, 10 + (rctx->flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE); + r600_need_cs_space(rctx, 10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE); /* Flush the caches for the first copy only. */ - if (rctx->flags) { + if (rctx->b.flags) { r600_flush_emit(rctx); } @@ -658,20 +497,20 @@ void r600_cp_dma_copy_buffer(struct r600_context *rctx, } /* This must be done after r600_need_cs_space. */ - src_reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ); - dst_reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE); + src_reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ); + dst_reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE); - r600_write_value(cs, PKT3(PKT3_CP_DMA, 4, 0)); - r600_write_value(cs, src_offset); /* SRC_ADDR_LO [31:0] */ - r600_write_value(cs, sync | ((src_offset >> 32) & 0xff)); /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */ - r600_write_value(cs, dst_offset); /* DST_ADDR_LO [31:0] */ - r600_write_value(cs, (dst_offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */ - r600_write_value(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */ + radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0)); + radeon_emit(cs, src_offset); /* SRC_ADDR_LO [31:0] */ + radeon_emit(cs, sync | ((src_offset >> 32) & 0xff)); /* CP_SYNC [31] | SRC_ADDR_HI [7:0] */ + radeon_emit(cs, dst_offset); /* DST_ADDR_LO [31:0] */ + radeon_emit(cs, (dst_offset >> 32) & 0xff); /* DST_ADDR_HI [7:0] */ + radeon_emit(cs, byte_count); /* COMMAND [29:22] | BYTE_COUNT [20:0] */ - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, src_reloc); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, dst_reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, src_reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, dst_reloc); size -= byte_count; src_offset += byte_count; @@ -689,10 +528,10 @@ void r600_cp_dma_copy_buffer(struct r600_context *rctx, void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw) { /* The number of dwords we already used in the DMA so far. */ - num_dw += ctx->rings.dma.cs->cdw; + num_dw += ctx->b.rings.dma.cs->cdw; /* Flush if there's not enough space. */ if (num_dw > RADEON_MAX_CMDBUF_DWORDS) { - ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); + ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); } } @@ -703,13 +542,13 @@ void r600_dma_copy(struct r600_context *rctx, uint64_t src_offset, uint64_t size) { - struct radeon_winsys_cs *cs = rctx->rings.dma.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs; unsigned i, ncopy, csize, shift; struct r600_resource *rdst = (struct r600_resource*)dst; struct r600_resource *rsrc = (struct r600_resource*)src; /* make sure that the dma ring is only one active */ - rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC); + rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC); size >>= 2; shift = 2; @@ -719,8 +558,8 @@ void r600_dma_copy(struct r600_context *rctx, for (i = 0; i < ncopy; i++) { csize = size < 0xffff ? size : 0xffff; /* emit reloc before writting cs so that cs is always in consistent state */ - r600_context_bo_reloc(rctx, &rctx->rings.dma, rsrc, RADEON_USAGE_READ); - r600_context_bo_reloc(rctx, &rctx->rings.dma, rdst, RADEON_USAGE_WRITE); + r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rsrc, RADEON_USAGE_READ); + r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rdst, RADEON_USAGE_WRITE); cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize); cs->buf[cs->cdw++] = dst_offset & 0xfffffffc; cs->buf[cs->cdw++] = src_offset & 0xfffffffc; @@ -746,7 +585,7 @@ void r600_flag_resource_cache_flush(struct r600_context *rctx, while (mask) { uint32_t i = u_bit_scan(&mask); if (rctx->vertex_buffer_state.vb[i].buffer == res) { - rctx->flags |= R600_CONTEXT_INV_VERTEX_CACHE; + rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE; } } @@ -755,7 +594,7 @@ void r600_flag_resource_cache_flush(struct r600_context *rctx, while (mask) { uint32_t i = u_bit_scan(&mask); if (rctx->cs_vertex_buffer_state.vb[i].buffer == res) { - rctx->flags |= R600_CONTEXT_INV_VERTEX_CACHE; + rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE; } } @@ -768,7 +607,7 @@ void r600_flag_resource_cache_flush(struct r600_context *rctx, while (mask) { unsigned i = u_bit_scan(&mask); if (state->cb[i].buffer == res) { - rctx->flags |= R600_CONTEXT_INV_CONST_CACHE; + rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE; shader = PIPE_SHADER_TYPES; /* break the outer loop */ break; @@ -784,7 +623,7 @@ void r600_flag_resource_cache_flush(struct r600_context *rctx, while (mask) { uint32_t i = u_bit_scan(&mask); if (&state->views[i]->tex_resource->b.b == res) { - rctx->flags |= R600_CONTEXT_INV_TEX_CACHE; + rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE; shader = PIPE_SHADER_TYPES; /* break the outer loop */ break; @@ -794,9 +633,9 @@ void r600_flag_resource_cache_flush(struct r600_context *rctx, /* Check streamout buffers. */ int i; - for (i = 0; i < rctx->streamout.num_targets; i++) { - if (rctx->streamout.targets[i]->b.buffer == res) { - rctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH | + for (i = 0; i < rctx->b.streamout.num_targets; i++) { + if (rctx->b.streamout.targets[i]->b.buffer == res) { + rctx->b.flags |= R600_CONTEXT_STREAMOUT_FLUSH | R600_CONTEXT_FLUSH_AND_INV | R600_CONTEXT_WAIT_3D_IDLE; break; @@ -810,12 +649,12 @@ void r600_flag_resource_cache_flush(struct r600_context *rctx, struct r600_texture *tex = (struct r600_texture*)rctx->framebuffer.state.cbufs[i]->texture; - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_CB | + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB | R600_CONTEXT_FLUSH_AND_INV | R600_CONTEXT_WAIT_3D_IDLE; if (tex->cmask_size || tex->fmask_size) { - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META; + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META; } break; } @@ -824,7 +663,7 @@ void r600_flag_resource_cache_flush(struct r600_context *rctx, /* Check a depth buffer. */ if (rctx->framebuffer.state.zsbuf) { if (rctx->framebuffer.state.zsbuf->texture == res) { - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_DB | + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB | R600_CONTEXT_FLUSH_AND_INV | R600_CONTEXT_WAIT_3D_IDLE; } @@ -832,7 +671,7 @@ void r600_flag_resource_cache_flush(struct r600_context *rctx, struct r600_texture *tex = (struct r600_texture*)rctx->framebuffer.state.zsbuf->texture; if (tex && tex->htile && &tex->htile->b.b == res) { - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META | + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META | R600_CONTEXT_FLUSH_AND_INV | R600_CONTEXT_WAIT_3D_IDLE; } diff --git a/src/gallium/drivers/r600/r600_isa.c b/src/gallium/drivers/r600/r600_isa.c index 4c6ccacdd07..81544ca3dd1 100644 --- a/src/gallium/drivers/r600/r600_isa.c +++ b/src/gallium/drivers/r600/r600_isa.c @@ -30,8 +30,8 @@ int r600_isa_init(struct r600_context *ctx, struct r600_isa *isa) { unsigned i; - assert(ctx->chip_class >= R600 && ctx->chip_class <= CAYMAN); - isa->hw_class = ctx->chip_class - R600; + assert(ctx->b.chip_class >= R600 && ctx->b.chip_class <= CAYMAN); + isa->hw_class = ctx->b.chip_class - R600; /* reverse lookup maps are required for bytecode parsing */ diff --git a/src/gallium/drivers/r600/r600_pipe.c b/src/gallium/drivers/r600/r600_pipe.c index edd50f00b6d..b4b8c886f64 100644 --- a/src/gallium/drivers/r600/r600_pipe.c +++ b/src/gallium/drivers/r600/r600_pipe.c @@ -92,7 +92,7 @@ static struct r600_fence *r600_create_fence(struct r600_context *rctx) if (!rscreen->fences.bo) { /* Create the shared buffer object */ rscreen->fences.bo = (struct r600_resource*) - pipe_buffer_create(&rscreen->screen, PIPE_BIND_CUSTOM, + pipe_buffer_create(&rscreen->b.b, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, 4096); if (!rscreen->fences.bo) { R600_ERR("r600: failed to create bo for fence objects\n"); @@ -148,10 +148,10 @@ static struct r600_fence *r600_create_fence(struct r600_context *rctx) /* Create a dummy BO so that fence_finish without a timeout can sleep waiting for completion */ fence->sleep_bo = (struct r600_resource*) - pipe_buffer_create(&rctx->screen->screen, PIPE_BIND_CUSTOM, + pipe_buffer_create(&rctx->screen->b.b, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, 1); /* Add the fence as a dummy relocation. */ - r600_context_bo_reloc(rctx, &rctx->rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE); + r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, fence->sleep_bo, RADEON_USAGE_READWRITE); out: pipe_mutex_unlock(rscreen->fences.mutex); @@ -165,10 +165,10 @@ static void r600_flush(struct pipe_context *ctx, unsigned flags) unsigned render_cond_mode = 0; boolean render_cond_cond = FALSE; - if (rctx->rings.gfx.cs->cdw == rctx->initial_gfx_cs_size) + if (rctx->b.rings.gfx.cs->cdw == rctx->initial_gfx_cs_size) return; - rctx->rings.gfx.flushing = true; + rctx->b.rings.gfx.flushing = true; /* Disable render condition. */ if (rctx->current_render_cond) { render_cond = rctx->current_render_cond; @@ -178,7 +178,7 @@ static void r600_flush(struct pipe_context *ctx, unsigned flags) } r600_context_flush(rctx, flags); - rctx->rings.gfx.flushing = false; + rctx->b.rings.gfx.flushing = false; r600_begin_new_cs(rctx); /* Re-enable render condition. */ @@ -186,7 +186,7 @@ static void r600_flush(struct pipe_context *ctx, unsigned flags) ctx->render_condition(ctx, render_cond, render_cond_cond, render_cond_mode); } - rctx->initial_gfx_cs_size = rctx->rings.gfx.cs->cdw; + rctx->initial_gfx_cs_size = rctx->b.rings.gfx.cs->cdw; } static void r600_flush_from_st(struct pipe_context *ctx, @@ -202,10 +202,10 @@ static void r600_flush_from_st(struct pipe_context *ctx, *rfence = r600_create_fence(rctx); } /* flush gfx & dma ring, order does not matter as only one can be live */ - if (rctx->rings.dma.cs) { - rctx->rings.dma.flush(rctx, fflags); + if (rctx->b.rings.dma.cs) { + rctx->b.rings.dma.flush(rctx, fflags); } - rctx->rings.gfx.flush(rctx, fflags); + rctx->b.rings.gfx.flush(rctx, fflags); } static void r600_flush_gfx_ring(void *ctx, unsigned flags) @@ -216,7 +216,7 @@ static void r600_flush_gfx_ring(void *ctx, unsigned flags) static void r600_flush_dma_ring(void *ctx, unsigned flags) { struct r600_context *rctx = (struct r600_context *)ctx; - struct radeon_winsys_cs *cs = rctx->rings.dma.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs; unsigned padding_dw, i; if (!cs->cdw) { @@ -231,20 +231,20 @@ static void r600_flush_dma_ring(void *ctx, unsigned flags) } } - rctx->rings.dma.flushing = true; - rctx->ws->cs_flush(cs, flags, 0); - rctx->rings.dma.flushing = false; + rctx->b.rings.dma.flushing = true; + rctx->b.ws->cs_flush(cs, flags, 0); + rctx->b.rings.dma.flushing = false; } boolean r600_rings_is_buffer_referenced(struct r600_context *ctx, struct radeon_winsys_cs_handle *buf, enum radeon_bo_usage usage) { - if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, buf, usage)) { + if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.gfx.cs, buf, usage)) { return TRUE; } - if (ctx->rings.dma.cs) { - if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, buf, usage)) { + if (ctx->b.rings.dma.cs) { + if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.dma.cs, buf, usage)) { return TRUE; } } @@ -260,7 +260,7 @@ void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx, bool sync_flush = TRUE; if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) { - return ctx->ws->buffer_map(resource->cs_buf, NULL, usage); + return ctx->b.ws->buffer_map(resource->cs_buf, NULL, usage); } if (!(usage & PIPE_TRANSFER_WRITE)) { @@ -271,15 +271,15 @@ void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx, flags |= RADEON_FLUSH_ASYNC; } - if (ctx->ws->cs_is_buffer_referenced(ctx->rings.gfx.cs, resource->cs_buf, rusage) && ctx->rings.gfx.cs->cdw) { - ctx->rings.gfx.flush(ctx, flags); + if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.gfx.cs, resource->cs_buf, rusage) && ctx->b.rings.gfx.cs->cdw) { + ctx->b.rings.gfx.flush(ctx, flags); if (usage & PIPE_TRANSFER_DONTBLOCK) { return NULL; } } - if (ctx->rings.dma.cs) { - if (ctx->ws->cs_is_buffer_referenced(ctx->rings.dma.cs, resource->cs_buf, rusage) && ctx->rings.dma.cs->cdw) { - ctx->rings.dma.flush(ctx, flags); + if (ctx->b.rings.dma.cs) { + if (ctx->b.ws->cs_is_buffer_referenced(ctx->b.rings.dma.cs, resource->cs_buf, rusage) && ctx->b.rings.dma.cs->cdw) { + ctx->b.rings.dma.flush(ctx, flags); if (usage & PIPE_TRANSFER_DONTBLOCK) { return NULL; } @@ -287,34 +287,34 @@ void *r600_buffer_mmap_sync_with_rings(struct r600_context *ctx, } if (usage & PIPE_TRANSFER_DONTBLOCK) { - if (ctx->ws->buffer_is_busy(resource->buf, rusage)) { + if (ctx->b.ws->buffer_is_busy(resource->buf, rusage)) { return NULL; } } if (sync_flush) { /* Try to avoid busy-waiting in radeon_bo_wait. */ - ctx->ws->cs_sync_flush(ctx->rings.gfx.cs); - if (ctx->rings.dma.cs) { - ctx->ws->cs_sync_flush(ctx->rings.dma.cs); + ctx->b.ws->cs_sync_flush(ctx->b.rings.gfx.cs); + if (ctx->b.rings.dma.cs) { + ctx->b.ws->cs_sync_flush(ctx->b.rings.dma.cs); } } /* at this point everything is synchronized */ - return ctx->ws->buffer_map(resource->cs_buf, NULL, usage); + return ctx->b.ws->buffer_map(resource->cs_buf, NULL, usage); } static void r600_flush_from_winsys(void *ctx, unsigned flags) { struct r600_context *rctx = (struct r600_context *)ctx; - rctx->rings.gfx.flush(rctx, flags); + rctx->b.rings.gfx.flush(rctx, flags); } static void r600_flush_dma_from_winsys(void *ctx, unsigned flags) { struct r600_context *rctx = (struct r600_context *)ctx; - rctx->rings.dma.flush(rctx, flags); + rctx->b.rings.dma.flush(rctx, flags); } static void r600_destroy_context(struct pipe_context *context) @@ -329,16 +329,16 @@ static void r600_destroy_context(struct pipe_context *context) pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL); if (rctx->dummy_pixel_shader) { - rctx->context.delete_fs_state(&rctx->context, rctx->dummy_pixel_shader); + rctx->b.b.delete_fs_state(&rctx->b.b, rctx->dummy_pixel_shader); } if (rctx->custom_dsa_flush) { - rctx->context.delete_depth_stencil_alpha_state(&rctx->context, rctx->custom_dsa_flush); + rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush); } if (rctx->custom_blend_resolve) { - rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_resolve); + rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_resolve); } if (rctx->custom_blend_decompress) { - rctx->context.delete_blend_state(&rctx->context, rctx->custom_blend_decompress); + rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_decompress); } util_unreference_framebuffer_state(&rctx->framebuffer.state); @@ -348,9 +348,6 @@ static void r600_destroy_context(struct pipe_context *context) if (rctx->uploader) { u_upload_destroy(rctx->uploader); } - if (rctx->allocator_so_filled_size) { - u_suballocator_destroy(rctx->allocator_so_filled_size); - } if (rctx->allocator_fetch_shader) { u_suballocator_destroy(rctx->allocator_fetch_shader); } @@ -358,13 +355,14 @@ static void r600_destroy_context(struct pipe_context *context) r600_release_command_buffer(&rctx->start_cs_cmd); - if (rctx->rings.gfx.cs) { - rctx->ws->cs_destroy(rctx->rings.gfx.cs); + if (rctx->b.rings.gfx.cs) { + rctx->b.ws->cs_destroy(rctx->b.rings.gfx.cs); } - if (rctx->rings.dma.cs) { - rctx->ws->cs_destroy(rctx->rings.dma.cs); + if (rctx->b.rings.dma.cs) { + rctx->b.ws->cs_destroy(rctx->b.rings.dma.cs); } + r600_common_context_cleanup(&rctx->b); FREE(rctx); } @@ -380,17 +378,16 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void sizeof(struct r600_transfer), 64, UTIL_SLAB_SINGLETHREADED); - rctx->context.screen = screen; - rctx->context.priv = priv; - rctx->context.destroy = r600_destroy_context; - rctx->context.flush = r600_flush_from_st; + rctx->b.b.screen = screen; + rctx->b.b.priv = priv; + rctx->b.b.destroy = r600_destroy_context; + rctx->b.b.flush = r600_flush_from_st; + + if (!r600_common_context_init(&rctx->b, &rscreen->b)) + goto fail; - /* Easy accessing of screen/winsys. */ rctx->screen = rscreen; - rctx->ws = rscreen->ws; - rctx->family = rscreen->family; - rctx->chip_class = rscreen->chip_class; - rctx->keep_tiling_flags = rscreen->info.drm_minor >= 12; + rctx->keep_tiling_flags = rscreen->b.info.drm_minor >= 12; LIST_INITHEAD(&rctx->active_nontimer_queries); @@ -399,31 +396,31 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void r600_init_context_resource_functions(rctx); r600_init_surface_functions(rctx); - if (rscreen->info.has_uvd) { - rctx->context.create_video_codec = r600_uvd_create_decoder; - rctx->context.create_video_buffer = r600_video_buffer_create; + if (rscreen->b.info.has_uvd) { + rctx->b.b.create_video_codec = r600_uvd_create_decoder; + rctx->b.b.create_video_buffer = r600_video_buffer_create; } else { - rctx->context.create_video_codec = vl_create_decoder; - rctx->context.create_video_buffer = vl_video_buffer_create; + rctx->b.b.create_video_codec = vl_create_decoder; + rctx->b.b.create_video_buffer = vl_video_buffer_create; } r600_init_common_state_functions(rctx); - switch (rctx->chip_class) { + switch (rctx->b.chip_class) { case R600: case R700: r600_init_state_functions(rctx); r600_init_atom_start_cs(rctx); rctx->max_db = 4; rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx); - rctx->custom_blend_resolve = rctx->chip_class == R700 ? r700_create_resolve_blend(rctx) + rctx->custom_blend_resolve = rctx->b.chip_class == R700 ? r700_create_resolve_blend(rctx) : r600_create_resolve_blend(rctx); rctx->custom_blend_decompress = r600_create_decompress_blend(rctx); - rctx->has_vertex_cache = !(rctx->family == CHIP_RV610 || - rctx->family == CHIP_RV620 || - rctx->family == CHIP_RS780 || - rctx->family == CHIP_RS880 || - rctx->family == CHIP_RV710); + rctx->has_vertex_cache = !(rctx->b.family == CHIP_RV610 || + rctx->b.family == CHIP_RV620 || + rctx->b.family == CHIP_RS780 || + rctx->b.family == CHIP_RS880 || + rctx->b.family == CHIP_RV710); break; case EVERGREEN: case CAYMAN: @@ -434,57 +431,52 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx); rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx); rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx); - rctx->has_vertex_cache = !(rctx->family == CHIP_CEDAR || - rctx->family == CHIP_PALM || - rctx->family == CHIP_SUMO || - rctx->family == CHIP_SUMO2 || - rctx->family == CHIP_CAICOS || - rctx->family == CHIP_CAYMAN || - rctx->family == CHIP_ARUBA); + rctx->has_vertex_cache = !(rctx->b.family == CHIP_CEDAR || + rctx->b.family == CHIP_PALM || + rctx->b.family == CHIP_SUMO || + rctx->b.family == CHIP_SUMO2 || + rctx->b.family == CHIP_CAICOS || + rctx->b.family == CHIP_CAYMAN || + rctx->b.family == CHIP_ARUBA); break; default: - R600_ERR("Unsupported chip class %d.\n", rctx->chip_class); + R600_ERR("Unsupported chip class %d.\n", rctx->b.chip_class); goto fail; } if (rscreen->trace_bo) { - rctx->rings.gfx.cs = rctx->ws->cs_create(rctx->ws, RING_GFX, rscreen->trace_bo->cs_buf); + rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, rscreen->trace_bo->cs_buf); } else { - rctx->rings.gfx.cs = rctx->ws->cs_create(rctx->ws, RING_GFX, NULL); + rctx->b.rings.gfx.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_GFX, NULL); } - rctx->rings.gfx.flush = r600_flush_gfx_ring; - rctx->ws->cs_set_flush_callback(rctx->rings.gfx.cs, r600_flush_from_winsys, rctx); - rctx->rings.gfx.flushing = false; - - rctx->rings.dma.cs = NULL; - if (rscreen->info.r600_has_dma && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) { - rctx->rings.dma.cs = rctx->ws->cs_create(rctx->ws, RING_DMA, NULL); - rctx->rings.dma.flush = r600_flush_dma_ring; - rctx->ws->cs_set_flush_callback(rctx->rings.dma.cs, r600_flush_dma_from_winsys, rctx); - rctx->rings.dma.flushing = false; + rctx->b.rings.gfx.flush = r600_flush_gfx_ring; + rctx->b.ws->cs_set_flush_callback(rctx->b.rings.gfx.cs, r600_flush_from_winsys, rctx); + rctx->b.rings.gfx.flushing = false; + + rctx->b.rings.dma.cs = NULL; + if (rscreen->b.info.r600_has_dma && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) { + rctx->b.rings.dma.cs = rctx->b.ws->cs_create(rctx->b.ws, RING_DMA, NULL); + rctx->b.rings.dma.flush = r600_flush_dma_ring; + rctx->b.ws->cs_set_flush_callback(rctx->b.rings.dma.cs, r600_flush_dma_from_winsys, rctx); + rctx->b.rings.dma.flushing = false; } - rctx->uploader = u_upload_create(&rctx->context, 1024 * 1024, 256, + rctx->uploader = u_upload_create(&rctx->b.b, 1024 * 1024, 256, PIPE_BIND_INDEX_BUFFER | PIPE_BIND_CONSTANT_BUFFER); if (!rctx->uploader) goto fail; - rctx->allocator_fetch_shader = u_suballocator_create(&rctx->context, 64 * 1024, 256, + rctx->allocator_fetch_shader = u_suballocator_create(&rctx->b.b, 64 * 1024, 256, 0, PIPE_USAGE_STATIC, FALSE); if (!rctx->allocator_fetch_shader) goto fail; - rctx->allocator_so_filled_size = u_suballocator_create(&rctx->context, 4096, 4, - 0, PIPE_USAGE_STATIC, TRUE); - if (!rctx->allocator_so_filled_size) - goto fail; - rctx->isa = calloc(1, sizeof(struct r600_isa)); if (!rctx->isa || r600_isa_init(rctx, rctx->isa)) goto fail; - rctx->blitter = util_blitter_create(&rctx->context); + rctx->blitter = util_blitter_create(&rctx->b.b); if (rctx->blitter == NULL) goto fail; util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa); @@ -494,15 +486,15 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void r600_get_backend_mask(rctx); /* this emits commands and must be last */ rctx->dummy_pixel_shader = - util_make_fragment_cloneinput_shader(&rctx->context, 0, + util_make_fragment_cloneinput_shader(&rctx->b.b, 0, TGSI_SEMANTIC_GENERIC, TGSI_INTERPOLATE_CONSTANT); - rctx->context.bind_fs_state(&rctx->context, rctx->dummy_pixel_shader); + rctx->b.b.bind_fs_state(&rctx->b.b, rctx->dummy_pixel_shader); - return &rctx->context; + return &rctx->b.b; fail: - r600_destroy_context(&rctx->context); + r600_destroy_context(&rctx->b.b); return NULL; } @@ -550,13 +542,13 @@ static const char* r600_get_name(struct pipe_screen* pscreen) { struct r600_screen *rscreen = (struct r600_screen *)pscreen; - return r600_get_family_name(rscreen->family); + return r600_get_family_name(rscreen->b.family); } static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param) { struct r600_screen *rscreen = (struct r600_screen *)pscreen; - enum radeon_family family = rscreen->family; + enum radeon_family family = rscreen->b.family; switch (param) { /* Supported features (boolean caps). */ @@ -601,7 +593,7 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param) return 0; case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE: - return MIN2(rscreen->info.vram_size, 0xFFFFFFFF); + return MIN2(rscreen->b.info.vram_size, 0xFFFFFFFF); case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT: return R600_MAP_BUFFER_ALIGNMENT; @@ -654,7 +646,7 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param) else return 14; case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS: - return rscreen->info.drm_minor >= 9 ? + return rscreen->b.info.drm_minor >= 9 ? (family >= CHIP_CEDAR ? 16384 : 8192) : 0; case PIPE_CAP_MAX_COMBINED_SAMPLERS: return 32; @@ -666,10 +658,10 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param) /* Timer queries, present when the clock frequency is non zero. */ case PIPE_CAP_QUERY_TIME_ELAPSED: - return rscreen->info.r600_clock_crystal_freq != 0; + return rscreen->b.info.r600_clock_crystal_freq != 0; case PIPE_CAP_QUERY_TIMESTAMP: - return rscreen->info.drm_minor >= 20 && - rscreen->info.r600_clock_crystal_freq != 0; + return rscreen->b.info.drm_minor >= 20 && + rscreen->b.info.r600_clock_crystal_freq != 0; case PIPE_CAP_MIN_TEXEL_OFFSET: return -8; @@ -689,7 +681,7 @@ static float r600_get_paramf(struct pipe_screen* pscreen, enum pipe_capf param) { struct r600_screen *rscreen = (struct r600_screen *)pscreen; - enum radeon_family family = rscreen->family; + enum radeon_family family = rscreen->b.family; switch (param) { case PIPE_CAPF_MAX_LINE_WIDTH: @@ -879,7 +871,7 @@ static int r600_get_compute_param(struct pipe_screen *screen, //TODO: select these params by asic switch (param) { case PIPE_COMPUTE_CAP_IR_TARGET: { - const char *gpu = r600_llvm_gpu_string(rscreen->family); + const char *gpu = r600_llvm_gpu_string(rscreen->b.family); if (ret) { sprintf(ret, "%s-r600--", gpu); } @@ -986,16 +978,16 @@ static void r600_destroy_screen(struct pipe_screen* pscreen) FREE(entry); } - rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf); + rscreen->b.ws->buffer_unmap(rscreen->fences.bo->cs_buf); pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL); } if (rscreen->trace_bo) { - rscreen->ws->buffer_unmap(rscreen->trace_bo->cs_buf); + rscreen->b.ws->buffer_unmap(rscreen->trace_bo->cs_buf); pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL); } pipe_mutex_destroy(rscreen->fences.mutex); - rscreen->ws->destroy(rscreen->ws); + rscreen->b.ws->destroy(rscreen->b.ws); FREE(rscreen); } @@ -1045,13 +1037,13 @@ static boolean r600_fence_finish(struct pipe_screen *pscreen, while (rscreen->fences.data[rfence->index] == 0) { /* Special-case infinite timeout - wait for the dummy BO to become idle */ if (timeout == PIPE_TIMEOUT_INFINITE) { - rscreen->ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE); + rscreen->b.ws->buffer_wait(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE); break; } /* The dummy BO will be busy until the CS including the fence has completed, or * the GPU is reset. Don't bother continuing to spin when the BO is idle. */ - if (!rscreen->ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE)) + if (!rscreen->b.ws->buffer_is_busy(rfence->sleep_bo->buf, RADEON_USAGE_READWRITE)) break; if (++spins % 256) @@ -1161,10 +1153,10 @@ static int evergreen_interpret_tiling(struct r600_screen *rscreen, uint32_t tili static int r600_init_tiling(struct r600_screen *rscreen) { - uint32_t tiling_config = rscreen->info.r600_tiling_config; + uint32_t tiling_config = rscreen->b.info.r600_tiling_config; /* set default group bytes, overridden by tiling info ioctl */ - if (rscreen->chip_class <= R700) { + if (rscreen->b.chip_class <= R700) { rscreen->tiling_info.group_bytes = 256; } else { rscreen->tiling_info.group_bytes = 512; @@ -1173,7 +1165,7 @@ static int r600_init_tiling(struct r600_screen *rscreen) if (!tiling_config) return 0; - if (rscreen->chip_class <= R700) { + if (rscreen->b.chip_class <= R700) { return r600_interpret_tiling(rscreen, tiling_config); } else { return evergreen_interpret_tiling(rscreen, tiling_config); @@ -1184,8 +1176,8 @@ static uint64_t r600_get_timestamp(struct pipe_screen *screen) { struct r600_screen *rscreen = (struct r600_screen*)screen; - return 1000000 * rscreen->ws->query_value(rscreen->ws, RADEON_TIMESTAMP) / - rscreen->info.r600_clock_crystal_freq; + return 1000000 * rscreen->b.ws->query_value(rscreen->b.ws, RADEON_TIMESTAMP) / + rscreen->b.info.r600_clock_crystal_freq; } static int r600_get_driver_query_info(struct pipe_screen *screen, @@ -1195,8 +1187,8 @@ static int r600_get_driver_query_info(struct pipe_screen *screen, struct r600_screen *rscreen = (struct r600_screen*)screen; struct pipe_driver_query_info list[] = { {"draw-calls", R600_QUERY_DRAW_CALLS, 0}, - {"requested-VRAM", R600_QUERY_REQUESTED_VRAM, rscreen->info.vram_size, TRUE}, - {"requested-GTT", R600_QUERY_REQUESTED_GTT, rscreen->info.gart_size, TRUE}, + {"requested-VRAM", R600_QUERY_REQUESTED_VRAM, rscreen->b.info.vram_size, TRUE}, + {"requested-GTT", R600_QUERY_REQUESTED_GTT, rscreen->b.info.gart_size, TRUE}, {"buffer-wait-time", R600_QUERY_BUFFER_WAIT_TIME, 0, FALSE} }; @@ -1218,8 +1210,7 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws) return NULL; } - rscreen->ws = ws; - ws->query_info(ws, &rscreen->info); + r600_common_screen_init(&rscreen->b, ws); rscreen->debug_flags = debug_get_flags_option("R600_DEBUG", debug_options, 0); if (debug_get_bool_option("R600_DEBUG_COMPUTE", FALSE)) @@ -1232,30 +1223,28 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws) rscreen->debug_flags |= DBG_NO_LLVM; if (debug_get_bool_option("R600_PRINT_TEXDEPTH", FALSE)) rscreen->debug_flags |= DBG_TEX_DEPTH; - rscreen->family = rscreen->info.family; - rscreen->chip_class = rscreen->info.chip_class; - if (rscreen->family == CHIP_UNKNOWN) { - fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->info.pci_id); + if (rscreen->b.family == CHIP_UNKNOWN) { + fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->b.info.pci_id); FREE(rscreen); return NULL; } /* Figure out streamout kernel support. */ - switch (rscreen->chip_class) { + switch (rscreen->b.chip_class) { case R600: - if (rscreen->family < CHIP_RS780) { - rscreen->has_streamout = rscreen->info.drm_minor >= 14; + if (rscreen->b.family < CHIP_RS780) { + rscreen->has_streamout = rscreen->b.info.drm_minor >= 14; } else { - rscreen->has_streamout = rscreen->info.drm_minor >= 23; + rscreen->has_streamout = rscreen->b.info.drm_minor >= 23; } break; case R700: - rscreen->has_streamout = rscreen->info.drm_minor >= 17; + rscreen->has_streamout = rscreen->b.info.drm_minor >= 17; break; case EVERGREEN: case CAYMAN: - rscreen->has_streamout = rscreen->info.drm_minor >= 14; + rscreen->has_streamout = rscreen->b.info.drm_minor >= 14; break; default: rscreen->has_streamout = FALSE; @@ -1263,18 +1252,18 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws) } /* MSAA support. */ - switch (rscreen->chip_class) { + switch (rscreen->b.chip_class) { case R600: case R700: - rscreen->has_msaa = rscreen->info.drm_minor >= 22; + rscreen->has_msaa = rscreen->b.info.drm_minor >= 22; rscreen->has_compressed_msaa_texturing = false; break; case EVERGREEN: - rscreen->has_msaa = rscreen->info.drm_minor >= 19; - rscreen->has_compressed_msaa_texturing = rscreen->info.drm_minor >= 24; + rscreen->has_msaa = rscreen->b.info.drm_minor >= 19; + rscreen->has_compressed_msaa_texturing = rscreen->b.info.drm_minor >= 24; break; case CAYMAN: - rscreen->has_msaa = rscreen->info.drm_minor >= 19; + rscreen->has_msaa = rscreen->b.info.drm_minor >= 19; rscreen->has_compressed_msaa_texturing = true; break; default: @@ -1282,7 +1271,7 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws) rscreen->has_compressed_msaa_texturing = false; } - rscreen->has_cp_dma = rscreen->info.drm_minor >= 27 && + rscreen->has_cp_dma = rscreen->b.info.drm_minor >= 27 && !(rscreen->debug_flags & DBG_NO_CP_DMA); if (r600_init_tiling(rscreen)) { @@ -1290,37 +1279,37 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws) return NULL; } - rscreen->screen.destroy = r600_destroy_screen; - rscreen->screen.get_name = r600_get_name; - rscreen->screen.get_vendor = r600_get_vendor; - rscreen->screen.get_param = r600_get_param; - rscreen->screen.get_shader_param = r600_get_shader_param; - rscreen->screen.get_paramf = r600_get_paramf; - rscreen->screen.get_compute_param = r600_get_compute_param; - rscreen->screen.get_timestamp = r600_get_timestamp; - - if (rscreen->chip_class >= EVERGREEN) { - rscreen->screen.is_format_supported = evergreen_is_format_supported; + rscreen->b.b.destroy = r600_destroy_screen; + rscreen->b.b.get_name = r600_get_name; + rscreen->b.b.get_vendor = r600_get_vendor; + rscreen->b.b.get_param = r600_get_param; + rscreen->b.b.get_shader_param = r600_get_shader_param; + rscreen->b.b.get_paramf = r600_get_paramf; + rscreen->b.b.get_compute_param = r600_get_compute_param; + rscreen->b.b.get_timestamp = r600_get_timestamp; + + if (rscreen->b.chip_class >= EVERGREEN) { + rscreen->b.b.is_format_supported = evergreen_is_format_supported; rscreen->dma_blit = &evergreen_dma_blit; } else { - rscreen->screen.is_format_supported = r600_is_format_supported; + rscreen->b.b.is_format_supported = r600_is_format_supported; rscreen->dma_blit = &r600_dma_blit; } - rscreen->screen.context_create = r600_create_context; - rscreen->screen.fence_reference = r600_fence_reference; - rscreen->screen.fence_signalled = r600_fence_signalled; - rscreen->screen.fence_finish = r600_fence_finish; - rscreen->screen.get_driver_query_info = r600_get_driver_query_info; - - if (rscreen->info.has_uvd) { - rscreen->screen.get_video_param = r600_uvd_get_video_param; - rscreen->screen.is_video_format_supported = ruvd_is_format_supported; + rscreen->b.b.context_create = r600_create_context; + rscreen->b.b.fence_reference = r600_fence_reference; + rscreen->b.b.fence_signalled = r600_fence_signalled; + rscreen->b.b.fence_finish = r600_fence_finish; + rscreen->b.b.get_driver_query_info = r600_get_driver_query_info; + + if (rscreen->b.info.has_uvd) { + rscreen->b.b.get_video_param = r600_uvd_get_video_param; + rscreen->b.b.is_video_format_supported = ruvd_is_format_supported; } else { - rscreen->screen.get_video_param = r600_get_video_param; - rscreen->screen.is_video_format_supported = vl_video_buffer_is_format_supported; + rscreen->b.b.get_video_param = r600_get_video_param; + rscreen->b.b.is_video_format_supported = vl_video_buffer_is_format_supported; } - r600_init_screen_resource_functions(&rscreen->screen); + r600_init_screen_resource_functions(&rscreen->b.b); util_format_s3tc_init(); @@ -1334,20 +1323,20 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws) rscreen->global_pool = compute_memory_pool_new(rscreen); rscreen->cs_count = 0; - if (rscreen->info.drm_minor >= 28 && (rscreen->debug_flags & DBG_TRACE_CS)) { - rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->screen, + if (rscreen->b.info.drm_minor >= 28 && (rscreen->debug_flags & DBG_TRACE_CS)) { + rscreen->trace_bo = (struct r600_resource*)pipe_buffer_create(&rscreen->b.b, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, 4096); if (rscreen->trace_bo) { - rscreen->trace_ptr = rscreen->ws->buffer_map(rscreen->trace_bo->cs_buf, NULL, + rscreen->trace_ptr = rscreen->b.ws->buffer_map(rscreen->trace_bo->cs_buf, NULL, PIPE_TRANSFER_UNSYNCHRONIZED); } } /* Create the auxiliary context. */ pipe_mutex_init(rscreen->aux_context_lock); - rscreen->aux_context = rscreen->screen.context_create(&rscreen->screen, NULL); + rscreen->aux_context = rscreen->b.b.context_create(&rscreen->b.b, NULL); #if 0 /* This is for testing whether aux_context and buffer clearing work correctly. */ struct pipe_resource templ = {}; @@ -1381,5 +1370,5 @@ struct pipe_screen *r600_screen_create(struct radeon_winsys *ws) } #endif - return &rscreen->screen; + return &rscreen->b.b; } diff --git a/src/gallium/drivers/r600/r600_pipe.h b/src/gallium/drivers/r600/r600_pipe.h index 668a846d910..2ba0251dccf 100644 --- a/src/gallium/drivers/r600/r600_pipe.h +++ b/src/gallium/drivers/r600/r600_pipe.h @@ -26,14 +26,18 @@ #ifndef R600_PIPE_H #define R600_PIPE_H +#include "../radeon/r600_pipe_common.h" +#include "../radeon/r600_cs.h" + +#include "r600_llvm.h" +#include "r600_public.h" +#include "r600_resource.h" + #include "util/u_blitter.h" #include "util/u_slab.h" #include "util/u_suballoc.h" #include "util/u_double_list.h" #include "util/u_transfer.h" -#include "r600_llvm.h" -#include "r600_public.h" -#include "r600_resource.h" #define R600_NUM_ATOMS 41 @@ -64,22 +68,6 @@ #define R600_ERR(fmt, args...) \ fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args) -/* read caches */ -#define R600_CONTEXT_INV_VERTEX_CACHE (1 << 0) -#define R600_CONTEXT_INV_TEX_CACHE (1 << 1) -#define R600_CONTEXT_INV_CONST_CACHE (1 << 2) -/* read-write caches */ -#define R600_CONTEXT_STREAMOUT_FLUSH (1 << 8) -#define R600_CONTEXT_FLUSH_AND_INV (1 << 9) -#define R600_CONTEXT_FLUSH_AND_INV_CB_META (1 << 10) -#define R600_CONTEXT_FLUSH_AND_INV_DB_META (1 << 11) -#define R600_CONTEXT_FLUSH_AND_INV_DB (1 << 12) -#define R600_CONTEXT_FLUSH_AND_INV_CB (1 << 13) -/* engine synchronization */ -#define R600_CONTEXT_PS_PARTIAL_FLUSH (1 << 16) -#define R600_CONTEXT_WAIT_3D_IDLE (1 << 17) -#define R600_CONTEXT_WAIT_CP_DMA_IDLE (1 << 18) - #define R600_QUERY_DRAW_CALLS (PIPE_QUERY_DRIVER_SPECIFIC + 0) #define R600_QUERY_REQUESTED_VRAM (PIPE_QUERY_DRIVER_SPECIFIC + 1) #define R600_QUERY_REQUESTED_GTT (PIPE_QUERY_DRIVER_SPECIFIC + 2) @@ -89,16 +77,6 @@ struct r600_context; struct r600_bytecode; struct r600_shader_key; -/* This encapsulates a state or an operation which can emitted into the GPU - * command stream. It's not limited to states only, it can be used for anything - * that wants to write commands into the CS (e.g. cache flushes). */ -struct r600_atom { - void (*emit)(struct r600_context *ctx, struct r600_atom *state); - unsigned id; - unsigned num_dw; - bool dirty; -}; - /* This is an atom containing GPU commands that never change. * This is supposed to be copied directly into the CS. */ struct r600_command_buffer { @@ -265,12 +243,8 @@ struct r600_tiling_info { }; struct r600_screen { - struct pipe_screen screen; - struct radeon_winsys *ws; + struct r600_common_screen b; unsigned debug_flags; - unsigned family; - enum chip_class chip_class; - struct radeon_info info; bool has_streamout; bool has_msaa; bool has_cp_dma; @@ -486,56 +460,16 @@ struct r600_query { uint64_t end_result; }; -struct r600_so_target { - struct pipe_stream_output_target b; - - /* The buffer where BUFFER_FILLED_SIZE is stored. */ - struct r600_resource *buf_filled_size; - unsigned buf_filled_size_offset; - - unsigned stride_in_dw; - unsigned so_index; -}; - -struct r600_streamout { - struct r600_atom begin_atom; - bool begin_emitted; - unsigned num_dw_for_end; - - unsigned enabled_mask; - unsigned num_targets; - struct r600_so_target *targets[PIPE_MAX_SO_BUFFERS]; - - unsigned append_bitmask; - bool suspended; -}; - -struct r600_ring { - struct radeon_winsys_cs *cs; - bool flushing; - void (*flush)(void *ctx, unsigned flags); -}; - -struct r600_rings { - struct r600_ring gfx; - struct r600_ring dma; -}; - struct r600_context { - struct pipe_context context; + struct r600_common_context b; struct r600_screen *screen; - struct radeon_winsys *ws; - struct r600_rings rings; struct blitter_context *blitter; struct u_upload_mgr *uploader; - struct u_suballocator *allocator_so_filled_size; struct u_suballocator *allocator_fetch_shader; struct util_slab_mempool pool_transfers; unsigned initial_gfx_cs_size; /* Hardware info. */ - enum radeon_family family; - enum chip_class chip_class; boolean has_vertex_cache; boolean keep_tiling_flags; unsigned default_ps_gprs, default_vs_gprs; @@ -543,10 +477,6 @@ struct r600_context { unsigned backend_mask; unsigned max_db; /* for OQ */ - /* current unaccounted memory usage */ - uint64_t vram; - uint64_t gtt; - /* Miscellaneous state objects. */ void *custom_dsa_flush; void *custom_blend_resolve; @@ -599,10 +529,8 @@ struct r600_context { struct r600_vertexbuf_state vertex_buffer_state; /** Vertex buffers for compute shaders */ struct r600_vertexbuf_state cs_vertex_buffer_state; - struct r600_streamout streamout; /* Additional context states. */ - unsigned flags; unsigned compute_cb_target_mask; struct r600_pipe_shader_selector *ps_shader; struct r600_pipe_shader_selector *vs_shader; @@ -654,7 +582,7 @@ void r600_trace_emit(struct r600_context *rctx); static INLINE void r600_emit_atom(struct r600_context *rctx, struct r600_atom *atom) { - atom->emit(rctx, atom); + atom->emit(&rctx->b, atom); atom->dirty = false; if (rctx->screen->trace_bo) { r600_trace_emit(rctx); @@ -831,16 +759,12 @@ boolean r600_dma_blit(struct pipe_context *ctx, struct pipe_resource *src, unsigned src_level, const struct pipe_box *src_box); -void r600_emit_streamout_begin(struct r600_context *ctx, struct r600_atom *atom); -void r600_emit_streamout_end(struct r600_context *ctx); void r600_flag_resource_cache_flush(struct r600_context *rctx, struct pipe_resource *res); /* * evergreen_hw_context.c */ -void evergreen_flush_vgt_streamout(struct r600_context *ctx); -void evergreen_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit); void evergreen_dma_copy(struct r600_context *rctx, struct pipe_resource *dst, struct pipe_resource *src, @@ -874,7 +798,6 @@ void r600_sampler_views_dirty(struct r600_context *rctx, void r600_sampler_states_dirty(struct r600_context *rctx, struct r600_sampler_states *state); void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state); -void r600_streamout_buffers_dirty(struct r600_context *rctx); void r600_draw_rectangle(struct blitter_context *blitter, int x1, int y1, int x2, int y2, float depth, enum blitter_attrib_type type, const union pipe_color_union *attrib); @@ -1021,60 +944,6 @@ static INLINE void eg_store_loop_const(struct r600_command_buffer *cb, unsigned void r600_init_command_buffer(struct r600_command_buffer *cb, unsigned num_dw); void r600_release_command_buffer(struct r600_command_buffer *cb); -/* - * Helpers for emitting state into a command stream directly. - */ -static INLINE unsigned r600_context_bo_reloc(struct r600_context *ctx, - struct r600_ring *ring, - struct r600_resource *rbo, - enum radeon_bo_usage usage) -{ - assert(usage); - /* make sure that all previous ring use are flushed so everything - * look serialized from driver pov - */ - if (!ring->flushing) { - if (ring == &ctx->rings.gfx) { - if (ctx->rings.dma.cs) { - /* flush dma ring */ - ctx->rings.dma.flush(ctx, RADEON_FLUSH_ASYNC); - } - } else { - /* flush gfx ring */ - ctx->rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC); - } - } - return ctx->ws->cs_add_reloc(ring->cs, rbo->cs_buf, usage, rbo->domains) * 4; -} - -static INLINE void r600_write_value(struct radeon_winsys_cs *cs, unsigned value) -{ - cs->buf[cs->cdw++] = value; -} - -static INLINE void r600_write_array(struct radeon_winsys_cs *cs, unsigned num, unsigned *ptr) -{ - assert(cs->cdw+num <= RADEON_MAX_CMDBUF_DWORDS); - memcpy(&cs->buf[cs->cdw], ptr, num * sizeof(ptr[0])); - cs->cdw += num; -} - -static INLINE void r600_write_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) -{ - assert(reg < R600_CONTEXT_REG_OFFSET); - assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS); - cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONFIG_REG, num, 0); - cs->buf[cs->cdw++] = (reg - R600_CONFIG_REG_OFFSET) >> 2; -} - -static INLINE void r600_write_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) -{ - assert(reg >= R600_CONTEXT_REG_OFFSET && reg < R600_CTL_CONST_OFFSET); - assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS); - cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, num, 0); - cs->buf[cs->cdw++] = (reg - R600_CONTEXT_REG_OFFSET) >> 2; -} - static INLINE void r600_write_compute_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) { r600_write_context_reg_seq(cs, reg, num); @@ -1090,22 +959,10 @@ static INLINE void r600_write_ctl_const_seq(struct radeon_winsys_cs *cs, unsigne cs->buf[cs->cdw++] = (reg - R600_CTL_CONST_OFFSET) >> 2; } -static INLINE void r600_write_config_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) -{ - r600_write_config_reg_seq(cs, reg, 1); - r600_write_value(cs, value); -} - -static INLINE void r600_write_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) -{ - r600_write_context_reg_seq(cs, reg, 1); - r600_write_value(cs, value); -} - static INLINE void r600_write_compute_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) { r600_write_compute_context_reg_seq(cs, reg, 1); - r600_write_value(cs, value); + radeon_emit(cs, value); } static INLINE void r600_write_context_reg_flag(struct radeon_winsys_cs *cs, unsigned reg, unsigned value, unsigned flag) @@ -1115,12 +972,12 @@ static INLINE void r600_write_context_reg_flag(struct radeon_winsys_cs *cs, unsi } else { r600_write_context_reg(cs, reg, value); } - } + static INLINE void r600_write_ctl_const(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) { r600_write_ctl_const_seq(cs, reg, 1); - r600_write_value(cs, value); + radeon_emit(cs, value); } /* @@ -1148,36 +1005,4 @@ static INLINE unsigned r600_pack_float_12p4(float x) x >= 4096 ? 0xffff : x * 16; } -static INLINE uint64_t r600_resource_va(struct pipe_screen *screen, struct pipe_resource *resource) -{ - struct r600_screen *rscreen = (struct r600_screen*)screen; - struct r600_resource *rresource = (struct r600_resource*)resource; - - return rscreen->ws->buffer_get_virtual_address(rresource->cs_buf); -} - -static INLINE void r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r) -{ - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_resource *rr = (struct r600_resource *)r; - - if (r == NULL) { - return; - } - - /* - * The idea is to compute a gross estimate of memory requirement of - * each draw call. After each draw call, memory will be precisely - * accounted. So the uncertainty is only on the current draw call. - * In practice this gave very good estimate (+/- 10% of the target - * memory limit). - */ - if (rr->domains & RADEON_DOMAIN_GTT) { - rctx->gtt += rr->buf->size; - } - if (rr->domains & RADEON_DOMAIN_VRAM) { - rctx->vram += rr->buf->size; - } -} - #endif diff --git a/src/gallium/drivers/r600/r600_query.c b/src/gallium/drivers/r600/r600_query.c index f77e1a8f52d..457c9ad3903 100644 --- a/src/gallium/drivers/r600/r600_query.c +++ b/src/gallium/drivers/r600/r600_query.c @@ -56,7 +56,7 @@ static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, uns * usage pattern. */ struct r600_resource *buf = (struct r600_resource*) - pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM, + pipe_buffer_create(&ctx->screen->b.b, PIPE_BIND_CUSTOM, PIPE_USAGE_STAGING, buf_size); switch (type) { @@ -76,7 +76,7 @@ static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, uns } results += 4 * ctx->max_db; } - ctx->ws->buffer_unmap(buf->cs_buf); + ctx->b.ws->buffer_unmap(buf->cs_buf); break; case PIPE_QUERY_TIME_ELAPSED: case PIPE_QUERY_TIMESTAMP: @@ -88,7 +88,7 @@ static struct r600_resource *r600_new_query_buffer(struct r600_context *ctx, uns case PIPE_QUERY_PIPELINE_STATISTICS: results = r600_buffer_mmap_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE); memset(results, 0, buf_size); - ctx->ws->buffer_unmap(buf->cs_buf); + ctx->b.ws->buffer_unmap(buf->cs_buf); break; default: assert(0); @@ -117,7 +117,7 @@ static void r600_update_occlusion_query_state(struct r600_context *rctx, static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *query) { - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; uint64_t va; r600_update_occlusion_query_state(ctx, query->type, 1); @@ -133,7 +133,7 @@ static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *q } /* emit begin query */ - va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf); + va = r600_resource_va(&ctx->screen->b.b, (void*)query->buffer.buf); va += query->buffer.results_end; switch (query->type) { @@ -176,7 +176,7 @@ static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *q assert(0); } cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE); + cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE); if (!r600_is_timer_query(query->type)) { ctx->num_cs_dw_nontimer_queries_suspend += query->num_cs_dw; @@ -185,7 +185,7 @@ static void r600_emit_query_begin(struct r600_context *ctx, struct r600_query *q static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *query) { - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; uint64_t va; /* The queries which need begin already called this in begin_query. */ @@ -193,7 +193,7 @@ static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *que r600_need_cs_space(ctx, query->num_cs_dw, FALSE); } - va = r600_resource_va(&ctx->screen->screen, (void*)query->buffer.buf); + va = r600_resource_va(&ctx->screen->b.b, (void*)query->buffer.buf); /* emit end query */ switch (query->type) { case PIPE_QUERY_OCCLUSION_COUNTER: @@ -242,7 +242,7 @@ static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *que assert(0); } cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE); + cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, query->buffer.buf, RADEON_USAGE_WRITE); query->buffer.results_end += query->result_size; @@ -258,7 +258,7 @@ static void r600_emit_query_end(struct r600_context *ctx, struct r600_query *que static void r600_emit_query_predication(struct r600_context *ctx, struct r600_query *query, int operation, bool flag_wait) { - struct radeon_winsys_cs *cs = ctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs; if (operation == PREDICATION_OP_CLEAR) { r600_need_cs_space(ctx, 3, FALSE); @@ -285,14 +285,14 @@ static void r600_emit_query_predication(struct r600_context *ctx, struct r600_qu /* emit predicate packets for all data blocks */ for (qbuf = &query->buffer; qbuf; qbuf = qbuf->previous) { unsigned results_base = 0; - uint64_t va = r600_resource_va(&ctx->screen->screen, &qbuf->buf->b.b); + uint64_t va = r600_resource_va(&ctx->screen->b.b, &qbuf->buf->b.b); while (results_base < qbuf->results_end) { cs->buf[cs->cdw++] = PKT3(PKT3_SET_PREDICATION, 1, 0); cs->buf[cs->cdw++] = (va + results_base) & 0xFFFFFFFFUL; cs->buf[cs->cdw++] = op | (((va + results_base) >> 32UL) & 0xFF); cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, &ctx->rings.gfx, qbuf->buf, RADEON_USAGE_READ); + cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, qbuf->buf, RADEON_USAGE_READ); results_base += query->result_size; /* set CONTINUE bit for all packets except the first */ @@ -338,7 +338,7 @@ static struct pipe_query *r600_create_query(struct pipe_context *ctx, unsigned q break; case PIPE_QUERY_PIPELINE_STATISTICS: /* 11 values on EG, 8 on R600. */ - query->result_size = (rctx->chip_class >= EVERGREEN ? 11 : 8) * 16; + query->result_size = (rctx->b.chip_class >= EVERGREEN ? 11 : 8) * 16; query->num_cs_dw = 8; break; /* Non-GPU queries. */ @@ -402,7 +402,7 @@ static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query) rquery->begin_result = 0; return; case R600_QUERY_BUFFER_WAIT_TIME: - rquery->begin_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS); + rquery->begin_result = rctx->b.ws->query_value(rctx->b.ws, RADEON_BUFFER_WAIT_TIME_NS); return; } @@ -416,7 +416,7 @@ static void r600_begin_query(struct pipe_context *ctx, struct pipe_query *query) /* Obtain a new buffer if the current one can't be mapped without a stall. */ if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) || - rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) { + rctx->b.ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) { pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL); rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type); } @@ -442,13 +442,13 @@ static void r600_end_query(struct pipe_context *ctx, struct pipe_query *query) rquery->end_result = rctx->num_draw_calls; return; case R600_QUERY_REQUESTED_VRAM: - rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_VRAM_MEMORY); + rquery->end_result = rctx->b.ws->query_value(rctx->b.ws, RADEON_REQUESTED_VRAM_MEMORY); return; case R600_QUERY_REQUESTED_GTT: - rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_GTT_MEMORY); + rquery->end_result = rctx->b.ws->query_value(rctx->b.ws, RADEON_REQUESTED_GTT_MEMORY); return; case R600_QUERY_BUFFER_WAIT_TIME: - rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_BUFFER_WAIT_TIME_NS); + rquery->end_result = rctx->b.ws->query_value(rctx->b.ws, RADEON_BUFFER_WAIT_TIME_NS); return; } @@ -571,7 +571,7 @@ static boolean r600_get_query_buffer_result(struct r600_context *ctx, } break; case PIPE_QUERY_PIPELINE_STATISTICS: - if (ctx->chip_class >= EVERGREEN) { + if (ctx->b.chip_class >= EVERGREEN) { while (results_base != qbuf->results_end) { result->pipeline_statistics.ps_invocations += r600_query_read_result(map + results_base, 0, 22, false); @@ -639,7 +639,7 @@ static boolean r600_get_query_buffer_result(struct r600_context *ctx, assert(0); } - ctx->ws->buffer_unmap(qbuf->buf->cs_buf); + ctx->b.ws->buffer_unmap(qbuf->buf->cs_buf); return TRUE; } @@ -662,7 +662,7 @@ static boolean r600_get_query_result(struct pipe_context *ctx, /* Convert the time to expected units. */ if (rquery->type == PIPE_QUERY_TIME_ELAPSED || rquery->type == PIPE_QUERY_TIMESTAMP) { - result->u64 = (1000000 * result->u64) / rctx->screen->info.r600_clock_crystal_freq; + result->u64 = (1000000 * result->u64) / rctx->screen->b.info.r600_clock_crystal_freq; } return TRUE; } @@ -734,12 +734,12 @@ void r600_resume_nontimer_queries(struct r600_context *ctx) void r600_init_query_functions(struct r600_context *rctx) { - rctx->context.create_query = r600_create_query; - rctx->context.destroy_query = r600_destroy_query; - rctx->context.begin_query = r600_begin_query; - rctx->context.end_query = r600_end_query; - rctx->context.get_query_result = r600_get_query_result; - - if (rctx->screen->info.r600_num_backends > 0) - rctx->context.render_condition = r600_render_condition; + rctx->b.b.create_query = r600_create_query; + rctx->b.b.destroy_query = r600_destroy_query; + rctx->b.b.begin_query = r600_begin_query; + rctx->b.b.end_query = r600_end_query; + rctx->b.b.get_query_result = r600_get_query_result; + + if (rctx->screen->b.info.r600_num_backends > 0) + rctx->b.b.render_condition = r600_render_condition; } diff --git a/src/gallium/drivers/r600/r600_resource.c b/src/gallium/drivers/r600/r600_resource.c index 5962f8ac199..a8fa357b133 100644 --- a/src/gallium/drivers/r600/r600_resource.c +++ b/src/gallium/drivers/r600/r600_resource.c @@ -69,8 +69,8 @@ void r600_init_screen_resource_functions(struct pipe_screen *screen) void r600_init_context_resource_functions(struct r600_context *r600) { - r600->context.transfer_map = u_transfer_map_vtbl; - r600->context.transfer_flush_region = u_default_transfer_flush_region; - r600->context.transfer_unmap = u_transfer_unmap_vtbl; - r600->context.transfer_inline_write = u_default_transfer_inline_write; + r600->b.b.transfer_map = u_transfer_map_vtbl; + r600->b.b.transfer_flush_region = u_default_transfer_flush_region; + r600->b.b.transfer_unmap = u_transfer_unmap_vtbl; + r600->b.b.transfer_inline_write = u_default_transfer_inline_write; } diff --git a/src/gallium/drivers/r600/r600_resource.h b/src/gallium/drivers/r600/r600_resource.h index 4c55f66e50c..92b9cc50b98 100644 --- a/src/gallium/drivers/r600/r600_resource.h +++ b/src/gallium/drivers/r600/r600_resource.h @@ -24,7 +24,7 @@ #define R600_RESOURCE_H #include "../../winsys/radeon/drm/radeon_winsys.h" -#include "util/u_range.h" +#include "../radeon/r600_pipe_common.h" struct r600_screen; @@ -33,27 +33,6 @@ struct r600_screen; #define R600_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1) #define R600_RESOURCE_FLAG_FORCE_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2) -struct r600_resource { - struct u_resource b; - - /* Winsys objects. */ - struct pb_buffer *buf; - struct radeon_winsys_cs_handle *cs_buf; - - /* Resource state. */ - enum radeon_bo_domain domains; - - /* The buffer range which is initialized (with a write transfer, - * streamout, DMA, or as a random access target). The rest of - * the buffer is considered invalid and can be mapped unsynchronized. - * - * This allows unsychronized mapping of a buffer range which hasn't - * been used yet. It's for applications which forget to use - * the unsynchronized map flag and expect the driver to figure it out. - */ - struct util_range valid_buffer_range; -}; - struct r600_transfer { struct pipe_transfer transfer; struct r600_resource *staging; @@ -179,11 +158,6 @@ struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen, const struct pipe_resource *base, struct winsys_handle *whandle); -static INLINE struct r600_resource *r600_resource(struct pipe_resource *r) -{ - return (struct r600_resource*)r; -} - bool r600_init_flushed_depth_texture(struct pipe_context *ctx, struct pipe_resource *texture, struct r600_texture **staging); diff --git a/src/gallium/drivers/r600/r600_shader.c b/src/gallium/drivers/r600/r600_shader.c index 9a23f01731a..ce15cd7e5a9 100644 --- a/src/gallium/drivers/r600/r600_shader.c +++ b/src/gallium/drivers/r600/r600_shader.c @@ -199,20 +199,20 @@ int r600_pipe_shader_create(struct pipe_context *ctx, } else { memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr)); } - rctx->ws->buffer_unmap(shader->bo->cs_buf); + rctx->b.ws->buffer_unmap(shader->bo->cs_buf); } /* Build state. */ switch (shader->shader.processor_type) { case TGSI_PROCESSOR_VERTEX: - if (rctx->chip_class >= EVERGREEN) { + if (rctx->b.chip_class >= EVERGREEN) { evergreen_update_vs_state(ctx, shader); } else { r600_update_vs_state(ctx, shader); } break; case TGSI_PROCESSOR_FRAGMENT: - if (rctx->chip_class >= EVERGREEN) { + if (rctx->b.chip_class >= EVERGREEN) { evergreen_update_ps_state(ctx, shader); } else { r600_update_ps_state(ctx, shader); @@ -930,7 +930,7 @@ static int r600_shader_from_tgsi(struct r600_screen *rscreen, ctx.shader = shader; ctx.native_integers = true; - r600_bytecode_init(ctx.bc, rscreen->chip_class, rscreen->family, + r600_bytecode_init(ctx.bc, rscreen->b.chip_class, rscreen->b.family, rscreen->has_compressed_msaa_texturing); ctx.tokens = tokens; tgsi_scan_shader(tokens, &ctx.info); @@ -1133,14 +1133,14 @@ static int r600_shader_from_tgsi(struct r600_screen *rscreen, radeon_llvm_ctx.r600_outputs = ctx.shader->output; radeon_llvm_ctx.color_buffer_count = max_color_exports; radeon_llvm_ctx.chip_class = ctx.bc->chip_class; - radeon_llvm_ctx.fs_color_all = shader->fs_write_all && (rscreen->chip_class >= EVERGREEN); + radeon_llvm_ctx.fs_color_all = shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN); radeon_llvm_ctx.stream_outputs = &so; radeon_llvm_ctx.clip_vertex = ctx.cv_output; radeon_llvm_ctx.alpha_to_one = key.alpha_to_one; mod = r600_tgsi_llvm(&radeon_llvm_ctx, tokens); ctx.shader->has_txq_cube_array_z_comp = radeon_llvm_ctx.has_txq_cube_array_z_comp; - if (r600_llvm_compile(mod, rscreen->family, ctx.bc, &use_kill, dump)) { + if (r600_llvm_compile(mod, rscreen->b.family, ctx.bc, &use_kill, dump)) { radeon_llvm_dispose(&radeon_llvm_ctx); use_llvm = 0; fprintf(stderr, "R600 LLVM backend failed to compile " @@ -1156,7 +1156,7 @@ static int r600_shader_from_tgsi(struct r600_screen *rscreen, #endif /* End of LLVM backend setup */ - if (shader->fs_write_all && rscreen->chip_class >= EVERGREEN) + if (shader->fs_write_all && rscreen->b.chip_class >= EVERGREEN) shader->nr_ps_max_color_exports = 8; if (!use_llvm) { @@ -1450,7 +1450,7 @@ static int r600_shader_from_tgsi(struct r600_screen *rscreen, output[j].array_base = shader->output[i].sid; output[j].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; shader->nr_ps_color_exports++; - if (shader->fs_write_all && (rscreen->chip_class >= EVERGREEN)) { + if (shader->fs_write_all && (rscreen->b.chip_class >= EVERGREEN)) { for (k = 1; k < max_color_exports; k++) { j++; memset(&output[j], 0, sizeof(struct r600_bytecode_output)); diff --git a/src/gallium/drivers/r600/r600_state.c b/src/gallium/drivers/r600/r600_state.c index 4590fdddc80..7a7ed7290ff 100644 --- a/src/gallium/drivers/r600/r600_state.c +++ b/src/gallium/drivers/r600/r600_state.c @@ -630,7 +630,7 @@ boolean r600_is_format_supported(struct pipe_screen *screen, return FALSE; /* R11G11B10 is broken on R6xx. */ - if (rscreen->chip_class == R600 && + if (rscreen->b.chip_class == R600 && format == PIPE_FORMAT_R11G11B10_FLOAT) return FALSE; @@ -686,7 +686,7 @@ boolean r600_is_format_supported(struct pipe_screen *screen, static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a; float offset_units = state->offset_units; float offset_scale = state->offset_scale; @@ -703,10 +703,10 @@ static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom } r600_write_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); - r600_write_value(cs, fui(offset_scale)); - r600_write_value(cs, fui(offset_units)); - r600_write_value(cs, fui(offset_scale)); - r600_write_value(cs, fui(offset_units)); + radeon_emit(cs, fui(offset_scale)); + radeon_emit(cs, fui(offset_units)); + radeon_emit(cs, fui(offset_scale)); + radeon_emit(cs, fui(offset_units)); } static uint32_t r600_get_blend_control(const struct pipe_blend_state *state, unsigned i) @@ -754,7 +754,7 @@ static void *r600_create_blend_state_mode(struct pipe_context *ctx, r600_init_command_buffer(&blend->buffer_no_blend, 20); /* R600 does not support per-MRT blends */ - if (rctx->family > CHIP_R600) + if (rctx->b.family > CHIP_R600) color_control |= S_028808_PER_MRT_BLEND(1); if (state->logicop_enable) { @@ -811,7 +811,7 @@ static void *r600_create_blend_state_mode(struct pipe_context *ctx, r600_store_context_reg(&blend->buffer, R_028804_CB_BLEND_CONTROL, r600_get_blend_control(state, 0)); - if (rctx->family > CHIP_R600) { + if (rctx->b.family > CHIP_R600) { r600_store_context_reg_seq(&blend->buffer, R_028780_CB_BLEND0_CONTROL, 8); for (int i = 0; i < 8; i++) { r600_store_value(&blend->buffer, r600_get_blend_control(state, i)); @@ -925,7 +925,7 @@ static void *r600_create_rs_state(struct pipe_context *ctx, sc_mode_cntl = S_028A4C_MSAA_ENABLE(state->multisample) | S_028A4C_LINE_STIPPLE_ENABLE(state->line_stipple_enable) | S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1); - if (rctx->chip_class >= R700) { + if (rctx->b.chip_class >= R700) { sc_mode_cntl |= S_028A4C_FORCE_EOV_REZ_ENABLE(1) | S_028A4C_R700_ZMM_LINE_OFFSET(1) | S_028A4C_R700_VPORT_SCISSOR_ENABLE(state->scissor); @@ -1185,11 +1185,11 @@ r600_create_sampler_view(struct pipe_context *ctx, static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_clip_state *state = &rctx->clip_state.state; r600_write_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4); - r600_write_array(cs, 6*4, (unsigned*)state); + radeon_emit_array(cs, (unsigned*)state, 6*4); } static void r600_set_polygon_stipple(struct pipe_context *ctx, @@ -1199,19 +1199,19 @@ static void r600_set_polygon_stipple(struct pipe_context *ctx, static void r600_emit_scissor_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_scissor_state *state = &rctx->scissor.scissor; - if (rctx->chip_class != R600 || rctx->scissor.enable) { + if (rctx->b.chip_class != R600 || rctx->scissor.enable) { r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2); - r600_write_value(cs, S_028240_TL_X(state->minx) | S_028240_TL_Y(state->miny) | + radeon_emit(cs, S_028240_TL_X(state->minx) | S_028240_TL_Y(state->miny) | S_028240_WINDOW_OFFSET_DISABLE(1)); - r600_write_value(cs, S_028244_BR_X(state->maxx) | S_028244_BR_Y(state->maxy)); + radeon_emit(cs, S_028244_BR_X(state->maxx) | S_028244_BR_Y(state->maxy)); } else { r600_write_context_reg_seq(cs, R_028250_PA_SC_VPORT_SCISSOR_0_TL, 2); - r600_write_value(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | + radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | S_028240_WINDOW_OFFSET_DISABLE(1)); - r600_write_value(cs, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); + radeon_emit(cs, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); } } @@ -1224,7 +1224,7 @@ static void r600_set_scissor_states(struct pipe_context *ctx, rctx->scissor.scissor = *state; - if (rctx->chip_class == R600 && !rctx->scissor.enable) + if (rctx->b.chip_class == R600 && !rctx->scissor.enable) return; rctx->scissor.atom.dirty = true; @@ -1247,7 +1247,7 @@ static struct r600_resource *r600_buffer_create_helper(struct r600_screen *rscre buffer.array_size = 1; return (struct r600_resource*) - r600_buffer_create(&rscreen->screen, &buffer, alignment); + r600_buffer_create(&rscreen->b.b, &buffer, alignment); } static void r600_init_color_surface(struct r600_context *rctx, @@ -1266,7 +1266,7 @@ static void r600_init_color_surface(struct r600_context *rctx, bool blend_bypass = 0, blend_clamp = 1; if (rtex->is_depth && !rtex->is_flushing_texture && !r600_can_read_depth(rtex)) { - r600_init_flushed_depth_texture(&rctx->context, surf->base.texture, NULL); + r600_init_flushed_depth_texture(&rctx->b.b, surf->base.texture, NULL); rtex = rtex->flushed_depth_texture; assert(rtex); } @@ -1354,7 +1354,7 @@ static void r600_init_color_surface(struct r600_context *rctx, /* EXPORT_NORM is an optimzation that can be enabled for better * performance in certain cases */ - if (rctx->chip_class == R600) { + if (rctx->b.chip_class == R600) { /* EXPORT_NORM can be enabled if: * - 11-bit or smaller UNORM/SNORM/SRGB * - BLEND_CLAMP is enabled @@ -1434,9 +1434,9 @@ static void r600_init_color_surface(struct r600_context *rctx, rctx->dummy_cmask = r600_buffer_create_helper(rscreen, cmask.size, cmask.alignment); /* Set the contents to 0xCC. */ - ptr = pipe_buffer_map(&rctx->context, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer); + ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer); memset(ptr, 0xCC, cmask.size); - pipe_buffer_unmap(&rctx->context, transfer); + pipe_buffer_unmap(&rctx->b.b, transfer); } pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, &rctx->dummy_cmask->b.b); @@ -1529,7 +1529,7 @@ static void r600_init_depth_surface(struct r600_context *rctx, surf->htile_enabled = 0; /* use htile only for first level */ if (rtex->htile && !level) { - uint64_t va = r600_resource_va(&rctx->screen->screen, &rtex->htile->b.b); + uint64_t va = r600_resource_va(&rctx->screen->b.b, &rtex->htile->b.b); surf->htile_enabled = 1; surf->db_htile_data_base = va >> 8; surf->db_htile_surface = S_028D24_HTILE_WIDTH(1) | @@ -1552,21 +1552,21 @@ static void r600_set_framebuffer_state(struct pipe_context *ctx, unsigned i; if (rctx->framebuffer.state.nr_cbufs) { - rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_CB; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB; - if (rctx->chip_class >= R700 && + if (rctx->b.chip_class >= R700 && rctx->framebuffer.state.cbufs[0]->texture->nr_samples > 1) { - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META; + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META; } } if (rctx->framebuffer.state.zsbuf) { - rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_DB; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | R600_CONTEXT_FLUSH_AND_INV; + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB; rtex = (struct r600_texture*)rctx->framebuffer.state.zsbuf->texture; - if (rctx->chip_class >= R700 && rtex->htile) { - rctx->flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META; + if (rctx->b.chip_class >= R700 && rtex->htile) { + rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META; } } @@ -1591,7 +1591,7 @@ static void r600_set_framebuffer_state(struct pipe_context *ctx, /* Colorbuffers. */ for (i = 0; i < state->nr_cbufs; i++) { /* The resolve buffer must have CMASK and FMASK to prevent hardlocks on R6xx. */ - bool force_cmask_fmask = rctx->chip_class == R600 && + bool force_cmask_fmask = rctx->b.chip_class == R600 && rctx->framebuffer.is_msaa_resolve && i == 1; @@ -1675,10 +1675,10 @@ static void r600_set_framebuffer_state(struct pipe_context *ctx, } if (rctx->framebuffer.state.zsbuf) { rctx->framebuffer.atom.num_dw += 18; - } else if (rctx->screen->info.drm_minor >= 18) { + } else if (rctx->screen->b.info.drm_minor >= 18) { rctx->framebuffer.atom.num_dw += 3; } - if (rctx->family > CHIP_R600 && rctx->family < CHIP_RV770) { + if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770) { rctx->framebuffer.atom.num_dw += 2; } @@ -1750,10 +1750,10 @@ static void r600_get_sample_position(struct pipe_context *ctx, static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; unsigned max_dist = 0; - if (rctx->family == CHIP_R600) { + if (rctx->b.family == CHIP_R600) { switch (nr_samples) { default: nr_samples = 0; @@ -1768,8 +1768,8 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) break; case 8: r600_write_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2); - r600_write_value(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */ - r600_write_value(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */ + radeon_emit(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */ + radeon_emit(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */ max_dist = max_dist_8x; break; } @@ -1777,26 +1777,26 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) switch (nr_samples) { default: r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); - r600_write_value(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ - r600_write_value(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ + radeon_emit(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ + radeon_emit(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ nr_samples = 0; break; case 2: r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); - r600_write_value(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ - r600_write_value(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ + radeon_emit(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ + radeon_emit(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ max_dist = max_dist_2x; break; case 4: r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); - r600_write_value(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ - r600_write_value(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ + radeon_emit(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ + radeon_emit(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ max_dist = max_dist_4x; break; case 8: r600_write_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); - r600_write_value(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ - r600_write_value(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ + radeon_emit(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ + radeon_emit(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ max_dist = max_dist_8x; break; } @@ -1804,20 +1804,20 @@ static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) if (nr_samples > 1) { r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); - r600_write_value(cs, S_028C00_LAST_PIXEL(1) | + radeon_emit(cs, S_028C00_LAST_PIXEL(1) | S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */ - r600_write_value(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | + radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */ } else { r600_write_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); - r600_write_value(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ - r600_write_value(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ + radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ + radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ } } static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_framebuffer_state *state = &rctx->framebuffer.state; unsigned nr_cbufs = state->nr_cbufs; struct r600_surface **cb = (struct r600_surface**)&state->cbufs[0]; @@ -1826,94 +1826,94 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a /* Colorbuffers. */ r600_write_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8); for (i = 0; i < nr_cbufs; i++) { - r600_write_value(cs, cb[i]->cb_color_info); + radeon_emit(cs, cb[i]->cb_color_info); } /* set CB_COLOR1_INFO for possible dual-src blending */ if (i == 1) { - r600_write_value(cs, cb[0]->cb_color_info); + radeon_emit(cs, cb[0]->cb_color_info); i++; } for (; i < 8; i++) { - r600_write_value(cs, 0); + radeon_emit(cs, 0); } if (nr_cbufs) { /* COLOR_BASE */ r600_write_context_reg_seq(cs, R_028040_CB_COLOR0_BASE, nr_cbufs); for (i = 0; i < nr_cbufs; i++) { - r600_write_value(cs, cb[i]->cb_color_base); + radeon_emit(cs, cb[i]->cb_color_base); } /* relocations */ for (i = 0; i < nr_cbufs; i++) { - unsigned reloc = r600_context_bo_reloc(rctx, - &rctx->rings.gfx, + unsigned reloc = r600_context_bo_reloc(&rctx->b, + &rctx->b.rings.gfx, (struct r600_resource*)cb[i]->base.texture, RADEON_USAGE_READWRITE); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); } r600_write_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs); for (i = 0; i < nr_cbufs; i++) { - r600_write_value(cs, cb[i]->cb_color_size); + radeon_emit(cs, cb[i]->cb_color_size); } r600_write_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs); for (i = 0; i < nr_cbufs; i++) { - r600_write_value(cs, cb[i]->cb_color_view); + radeon_emit(cs, cb[i]->cb_color_view); } r600_write_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs); for (i = 0; i < nr_cbufs; i++) { - r600_write_value(cs, cb[i]->cb_color_mask); + radeon_emit(cs, cb[i]->cb_color_mask); } /* FMASK. */ r600_write_context_reg_seq(cs, R_0280E0_CB_COLOR0_FRAG, nr_cbufs); for (i = 0; i < nr_cbufs; i++) { - r600_write_value(cs, cb[i]->cb_color_fmask); + radeon_emit(cs, cb[i]->cb_color_fmask); } /* relocations */ for (i = 0; i < nr_cbufs; i++) { - unsigned reloc = r600_context_bo_reloc(rctx, - &rctx->rings.gfx, + unsigned reloc = r600_context_bo_reloc(&rctx->b, + &rctx->b.rings.gfx, cb[i]->cb_buffer_fmask, RADEON_USAGE_READWRITE); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); } /* CMASK. */ r600_write_context_reg_seq(cs, R_0280C0_CB_COLOR0_TILE, nr_cbufs); for (i = 0; i < nr_cbufs; i++) { - r600_write_value(cs, cb[i]->cb_color_cmask); + radeon_emit(cs, cb[i]->cb_color_cmask); } /* relocations */ for (i = 0; i < nr_cbufs; i++) { - unsigned reloc = r600_context_bo_reloc(rctx, - &rctx->rings.gfx, + unsigned reloc = r600_context_bo_reloc(&rctx->b, + &rctx->b.rings.gfx, cb[i]->cb_buffer_cmask, RADEON_USAGE_READWRITE); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); } sbu |= SURFACE_BASE_UPDATE_COLOR_NUM(nr_cbufs); } /* SURFACE_BASE_UPDATE */ - if (rctx->family > CHIP_R600 && rctx->family < CHIP_RV770 && sbu) { - r600_write_value(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); - r600_write_value(cs, sbu); + if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) { + radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); + radeon_emit(cs, sbu); sbu = 0; } /* Zbuffer. */ if (state->zsbuf) { struct r600_surface *surf = (struct r600_surface*)state->zsbuf; - unsigned reloc = r600_context_bo_reloc(rctx, - &rctx->rings.gfx, + unsigned reloc = r600_context_bo_reloc(&rctx->b, + &rctx->b.rings.gfx, (struct r600_resource*)state->zsbuf->texture, RADEON_USAGE_READWRITE); @@ -1921,36 +1921,36 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a surf->pa_su_poly_offset_db_fmt_cntl); r600_write_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2); - r600_write_value(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */ - r600_write_value(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */ + radeon_emit(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */ + radeon_emit(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */ r600_write_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2); - r600_write_value(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */ - r600_write_value(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */ + radeon_emit(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */ + radeon_emit(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */ - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); r600_write_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit); sbu |= SURFACE_BASE_UPDATE_DEPTH; - } else if (rctx->screen->info.drm_minor >= 18) { + } else if (rctx->screen->b.info.drm_minor >= 18) { /* DRM 2.6.18 allows the INVALID format to disable depth/stencil. * Older kernels are out of luck. */ r600_write_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID)); } /* SURFACE_BASE_UPDATE */ - if (rctx->family > CHIP_R600 && rctx->family < CHIP_RV770 && sbu) { - r600_write_value(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); - r600_write_value(cs, sbu); + if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) { + radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); + radeon_emit(cs, sbu); sbu = 0; } /* Framebuffer dimensions. */ r600_write_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); - r600_write_value(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | + radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */ - r600_write_value(cs, S_028244_BR_X(state->width) | + radeon_emit(cs, S_028244_BR_X(state->width) | S_028244_BR_Y(state->height)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */ if (rctx->framebuffer.is_msaa_resolve) { @@ -1968,17 +1968,17 @@ static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_a static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom; if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) { r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); - if (rctx->chip_class == R600) { - r600_write_value(cs, 0xff); /* R_028238_CB_TARGET_MASK */ - r600_write_value(cs, 0xff); /* R_02823C_CB_SHADER_MASK */ + if (rctx->b.chip_class == R600) { + radeon_emit(cs, 0xff); /* R_028238_CB_TARGET_MASK */ + radeon_emit(cs, 0xff); /* R_02823C_CB_SHADER_MASK */ } else { - r600_write_value(cs, 0xf); /* R_028238_CB_TARGET_MASK */ - r600_write_value(cs, 0xf); /* R_02823C_CB_SHADER_MASK */ + radeon_emit(cs, 0xf); /* R_028238_CB_TARGET_MASK */ + radeon_emit(cs, 0xf); /* R_02823C_CB_SHADER_MASK */ } r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control); } else { @@ -1987,9 +1987,9 @@ static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom unsigned multiwrite = a->multiwrite && a->nr_cbufs > 1; r600_write_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); - r600_write_value(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */ + radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */ /* Always enable the first color output to make sure alpha-test works even without one. */ - r600_write_value(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */ + radeon_emit(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */ r600_write_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control | S_028808_MULTIWRITE_ENABLE(multiwrite)); @@ -1998,7 +1998,7 @@ static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_db_state *a = (struct r600_db_state*)atom; if (a->rsurf && a->rsurf->htile_enabled) { @@ -2008,7 +2008,7 @@ static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom r600_write_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear)); r600_write_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); r600_write_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); - reloc_idx = r600_context_bo_reloc(rctx, &rctx->rings.gfx, rtex->htile, RADEON_USAGE_READWRITE); + reloc_idx = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rtex->htile, RADEON_USAGE_READWRITE); cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); cs->buf[cs->cdw++] = reloc_idx; } else { @@ -2018,7 +2018,7 @@ static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom; unsigned db_render_control = 0; unsigned db_render_override = @@ -2026,7 +2026,7 @@ static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE); if (a->occlusion_query_enabled) { - if (rctx->chip_class >= R700) { + if (rctx->b.chip_class >= R700) { db_render_control |= S_028D0C_R700_PERFECT_ZPASS_COUNTS(1); } db_render_override |= S_028D10_NOOP_CULL_DISABLE(1); @@ -2061,14 +2061,14 @@ static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom } r600_write_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2); - r600_write_value(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */ - r600_write_value(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */ + radeon_emit(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */ + radeon_emit(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */ r600_write_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); } static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_config_state *a = (struct r600_config_state*)atom; r600_write_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1); @@ -2076,7 +2076,7 @@ static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom * static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint32_t dirty_mask = rctx->vertex_buffer_state.dirty_mask; while (dirty_mask) { @@ -2092,20 +2092,20 @@ static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom offset = vb->buffer_offset; /* fetch resources start at index 320 */ - r600_write_value(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); - r600_write_value(cs, (320 + buffer_index) * 7); - r600_write_value(cs, offset); /* RESOURCEi_WORD0 */ - r600_write_value(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */ - r600_write_value(cs, /* RESOURCEi_WORD2 */ + radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); + radeon_emit(cs, (320 + buffer_index) * 7); + radeon_emit(cs, offset); /* RESOURCEi_WORD0 */ + radeon_emit(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */ + radeon_emit(cs, /* RESOURCEi_WORD2 */ S_038008_ENDIAN_SWAP(r600_endian_swap(32)) | S_038008_STRIDE(vb->stride)); - r600_write_value(cs, 0); /* RESOURCEi_WORD3 */ - r600_write_value(cs, 0); /* RESOURCEi_WORD4 */ - r600_write_value(cs, 0); /* RESOURCEi_WORD5 */ - r600_write_value(cs, 0xc0000000); /* RESOURCEi_WORD6 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD3 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ + radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */ - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, rbuffer, RADEON_USAGE_READ)); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ)); } } @@ -2115,7 +2115,7 @@ static void r600_emit_constant_buffers(struct r600_context *rctx, unsigned reg_alu_constbuf_size, unsigned reg_alu_const_cache) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint32_t dirty_mask = state->dirty_mask; while (dirty_mask) { @@ -2134,23 +2134,23 @@ static void r600_emit_constant_buffers(struct r600_context *rctx, ALIGN_DIVUP(cb->buffer_size >> 4, 16)); r600_write_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, rbuffer, RADEON_USAGE_READ)); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ)); - r600_write_value(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); - r600_write_value(cs, (buffer_id_base + buffer_index) * 7); - r600_write_value(cs, offset); /* RESOURCEi_WORD0 */ - r600_write_value(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */ - r600_write_value(cs, /* RESOURCEi_WORD2 */ + radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); + radeon_emit(cs, (buffer_id_base + buffer_index) * 7); + radeon_emit(cs, offset); /* RESOURCEi_WORD0 */ + radeon_emit(cs, rbuffer->buf->size - offset - 1); /* RESOURCEi_WORD1 */ + radeon_emit(cs, /* RESOURCEi_WORD2 */ S_038008_ENDIAN_SWAP(r600_endian_swap(32)) | S_038008_STRIDE(16)); - r600_write_value(cs, 0); /* RESOURCEi_WORD3 */ - r600_write_value(cs, 0); /* RESOURCEi_WORD4 */ - r600_write_value(cs, 0); /* RESOURCEi_WORD5 */ - r600_write_value(cs, 0xc0000000); /* RESOURCEi_WORD6 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD3 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ + radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ + radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */ - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, rbuffer, RADEON_USAGE_READ)); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rbuffer, RADEON_USAGE_READ)); dirty_mask &= ~(1 << buffer_index); } @@ -2182,7 +2182,7 @@ static void r600_emit_sampler_views(struct r600_context *rctx, struct r600_samplerview_state *state, unsigned resource_id_base) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint32_t dirty_mask = state->dirty_mask; while (dirty_mask) { @@ -2193,16 +2193,16 @@ static void r600_emit_sampler_views(struct r600_context *rctx, rview = state->views[resource_index]; assert(rview); - r600_write_value(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); - r600_write_value(cs, (resource_id_base + resource_index) * 7); - r600_write_array(cs, 7, rview->tex_resource_words); + radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); + radeon_emit(cs, (resource_id_base + resource_index) * 7); + radeon_emit_array(cs, rview->tex_resource_words, 7); - reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, rview->tex_resource, + reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rview->tex_resource, RADEON_USAGE_READ); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); } state->dirty_mask = 0; } @@ -2234,7 +2234,7 @@ static void r600_emit_sampler_states(struct r600_context *rctx, unsigned resource_id_base, unsigned border_color_reg) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint32_t dirty_mask = texinfo->states.dirty_mask; while (dirty_mask) { @@ -2262,9 +2262,9 @@ static void r600_emit_sampler_states(struct r600_context *rctx, } } - r600_write_value(cs, PKT3(PKT3_SET_SAMPLER, 3, 0)); - r600_write_value(cs, (resource_id_base + i) * 3); - r600_write_array(cs, 3, rstate->tex_sampler_words); + radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0)); + radeon_emit(cs, (resource_id_base + i) * 3); + radeon_emit_array(cs, rstate->tex_sampler_words, 3); if (rstate->border_color_use) { unsigned offset; @@ -2272,7 +2272,7 @@ static void r600_emit_sampler_states(struct r600_context *rctx, offset = border_color_reg; offset += i * 16; r600_write_config_reg_seq(cs, offset, 4); - r600_write_array(cs, 4, rstate->border_color.ui); + radeon_emit_array(cs, rstate->border_color.ui, 4); } } texinfo->states.dirty_mask = 0; @@ -2295,7 +2295,7 @@ static void r600_emit_ps_sampler_states(struct r600_context *rctx, struct r600_a static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; unsigned tmp; tmp = S_009508_DISABLE_CUBE_ANISO(1) | @@ -2313,19 +2313,19 @@ static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a struct r600_sample_mask *s = (struct r600_sample_mask*)a; uint8_t mask = s->sample_mask; - r600_write_context_reg(rctx->rings.gfx.cs, R_028C48_PA_SC_AA_MASK, + r600_write_context_reg(rctx->b.rings.gfx.cs, R_028C48_PA_SC_AA_MASK, mask | (mask << 8) | (mask << 16) | (mask << 24)); } static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_cso_state *state = (struct r600_cso_state*)a; struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso; r600_write_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, shader->buffer, RADEON_USAGE_READ)); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->buffer, RADEON_USAGE_READ)); } /* Adjust GPR allocation on R6xx/R7xx */ @@ -2381,7 +2381,7 @@ bool r600_adjust_gprs(struct r600_context *rctx) if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp) { rctx->config_state.sq_gpr_resource_mgmt_1 = tmp; rctx->config_state.atom.dirty = true; - rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; } return true; } @@ -2412,7 +2412,7 @@ void r600_init_atom_start_cs(struct r600_context *rctx) r600_init_command_buffer(cb, 256); /* R6xx requires this packet at the start of each command buffer */ - if (rctx->chip_class == R600) { + if (rctx->b.chip_class == R600) { r600_store_value(cb, PKT3(PKT3_START_3D_CMDBUF, 0, 0)); r600_store_value(cb, 0); } @@ -2425,7 +2425,7 @@ void r600_init_atom_start_cs(struct r600_context *rctx) r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); - family = rctx->family; + family = rctx->b.family; ps_prio = 0; vs_prio = 1; gs_prio = 2; @@ -2594,7 +2594,7 @@ void r600_init_atom_start_cs(struct r600_context *rctx) r600_store_config_reg(cb, R_009714_VC_ENHANCE, 0); - if (rctx->chip_class >= R700) { + if (rctx->b.chip_class >= R700) { r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0x00004000); r600_store_config_reg(cb, R_009830_DB_DEBUG, 0); r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x00420204); @@ -2694,7 +2694,7 @@ void r600_init_atom_start_cs(struct r600_context *rctx) r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0); r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); - if (rctx->chip_class >= R700) { + if (rctx->b.chip_class >= R700) { r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA); } @@ -2725,7 +2725,7 @@ void r600_init_atom_start_cs(struct r600_context *rctx) r600_store_context_reg(cb, R_0288A4_SQ_PGM_RESOURCES_FS, 0); r600_store_context_reg(cb, R_0288DC_SQ_PGM_CF_OFFSET_FS, 0); - if (rctx->chip_class == R700 && rctx->screen->has_streamout) + if (rctx->b.chip_class == R700 && rctx->screen->has_streamout) r600_store_context_reg(cb, R_028354_SX_SURFACE_SYNC, S_028354_SURFACE_SYNC_MASK(0xf)); r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0); if (rctx->screen->has_streamout) { @@ -2834,7 +2834,7 @@ void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *sha } /* HW bug in original R600 */ - if (rctx->family == CHIP_R600) + if (rctx->b.family == CHIP_R600) ufi = 1; r600_store_context_reg_seq(cb, R_0286CC_SPI_PS_IN_CONTROL_0, 2); @@ -2923,7 +2923,7 @@ void *r600_create_resolve_blend(struct r600_context *rctx) blend.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO; blend.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO; } - return r600_create_blend_state_mode(&rctx->context, &blend, V_028808_SPECIAL_RESOLVE_BOX); + return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX); } void *r700_create_resolve_blend(struct r600_context *rctx) @@ -2933,7 +2933,7 @@ void *r700_create_resolve_blend(struct r600_context *rctx) memset(&blend, 0, sizeof(blend)); blend.independent_blend_enable = true; blend.rt[0].colormask = 0xf; - return r600_create_blend_state_mode(&rctx->context, &blend, V_028808_SPECIAL_RESOLVE_BOX); + return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX); } void *r600_create_decompress_blend(struct r600_context *rctx) @@ -2943,7 +2943,7 @@ void *r600_create_decompress_blend(struct r600_context *rctx) memset(&blend, 0, sizeof(blend)); blend.independent_blend_enable = true; blend.rt[0].colormask = 0xf; - return r600_create_blend_state_mode(&rctx->context, &blend, V_028808_SPECIAL_EXPAND_SAMPLES); + return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_EXPAND_SAMPLES); } void *r600_create_db_flush_dsa(struct r600_context *rctx) @@ -2951,8 +2951,8 @@ void *r600_create_db_flush_dsa(struct r600_context *rctx) struct pipe_depth_stencil_alpha_state dsa; boolean quirk = false; - if (rctx->family == CHIP_RV610 || rctx->family == CHIP_RV630 || - rctx->family == CHIP_RV620 || rctx->family == CHIP_RV635) + if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 || + rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635) quirk = true; memset(&dsa, 0, sizeof(dsa)); @@ -2967,7 +2967,7 @@ void *r600_create_db_flush_dsa(struct r600_context *rctx) dsa.stencil[0].writemask = 0xff; } - return rctx->context.create_depth_stencil_alpha_state(&rctx->context, &dsa); + return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa); } void r600_update_db_shader_control(struct r600_context * rctx) @@ -3025,7 +3025,7 @@ static boolean r600_dma_copy_tile(struct r600_context *rctx, unsigned pitch, unsigned bpp) { - struct radeon_winsys_cs *cs = rctx->rings.dma.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs; struct r600_texture *rsrc = (struct r600_texture*)src; struct r600_texture *rdst = (struct r600_texture*)dst; unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size; @@ -3033,7 +3033,7 @@ static boolean r600_dma_copy_tile(struct r600_context *rctx, uint64_t base, addr; /* make sure that the dma ring is only one active */ - rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC); + rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC); dst_mode = rdst->surface.level[dst_level].mode; src_mode = rsrc->surface.level[src_level].mode; @@ -3101,8 +3101,8 @@ static boolean r600_dma_copy_tile(struct r600_context *rctx, cheight = cheight > copy_height ? copy_height : cheight; size = (cheight * pitch) >> 2; /* emit reloc before writting cs so that cs is always in consistent state */ - r600_context_bo_reloc(rctx, &rctx->rings.dma, &rsrc->resource, RADEON_USAGE_READ); - r600_context_bo_reloc(rctx, &rctx->rings.dma, &rdst->resource, RADEON_USAGE_WRITE); + r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rsrc->resource, RADEON_USAGE_READ); + r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, &rdst->resource, RADEON_USAGE_WRITE); cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 1, 0, size); cs->buf[cs->cdw++] = base >> 8; cs->buf[cs->cdw++] = (detile << 31) | (array_mode << 27) | @@ -3134,7 +3134,7 @@ boolean r600_dma_blit(struct pipe_context *ctx, unsigned src_w, dst_w; unsigned src_x, src_y; - if (rctx->rings.dma.cs == NULL) { + if (rctx->b.rings.dma.cs == NULL) { return FALSE; } if (src->format != dst->format) { @@ -3251,19 +3251,19 @@ void r600_init_state_functions(struct r600_context *rctx) r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4); r600_init_atom(rctx, &rctx->viewport.atom, id++, r600_emit_viewport_state, 8); r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, r600_emit_vertex_fetch_shader, 5); - r600_init_atom(rctx, &rctx->streamout.begin_atom, id++, r600_emit_streamout_begin, 0); + rctx->atoms[id++] = &rctx->b.streamout.begin_atom; r600_init_atom(rctx, &rctx->vertex_shader.atom, id++, r600_emit_shader, 23); r600_init_atom(rctx, &rctx->pixel_shader.atom, id++, r600_emit_shader, 0); - rctx->context.create_blend_state = r600_create_blend_state; - rctx->context.create_depth_stencil_alpha_state = r600_create_dsa_state; - rctx->context.create_rasterizer_state = r600_create_rs_state; - rctx->context.create_sampler_state = r600_create_sampler_state; - rctx->context.create_sampler_view = r600_create_sampler_view; - rctx->context.set_framebuffer_state = r600_set_framebuffer_state; - rctx->context.set_polygon_stipple = r600_set_polygon_stipple; - rctx->context.set_scissor_states = r600_set_scissor_states; + rctx->b.b.create_blend_state = r600_create_blend_state; + rctx->b.b.create_depth_stencil_alpha_state = r600_create_dsa_state; + rctx->b.b.create_rasterizer_state = r600_create_rs_state; + rctx->b.b.create_sampler_state = r600_create_sampler_state; + rctx->b.b.create_sampler_view = r600_create_sampler_view; + rctx->b.b.set_framebuffer_state = r600_set_framebuffer_state; + rctx->b.b.set_polygon_stipple = r600_set_polygon_stipple; + rctx->b.b.set_scissor_states = r600_set_scissor_states; - rctx->context.get_sample_position = r600_get_sample_position; + rctx->b.b.get_sample_position = r600_get_sample_position; } /* this function must be last */ diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c index ea5a4e7627c..31d08a877e1 100644 --- a/src/gallium/drivers/r600/r600_state_common.c +++ b/src/gallium/drivers/r600/r600_state_common.c @@ -58,24 +58,23 @@ void r600_init_atom(struct r600_context *rctx, assert(id < R600_NUM_ATOMS); assert(rctx->atoms[id] == NULL); rctx->atoms[id] = atom; - atom->id = id; - atom->emit = emit; + atom->emit = (void*)emit; atom->num_dw = num_dw; atom->dirty = false; } void r600_emit_cso_state(struct r600_context *rctx, struct r600_atom *atom) { - r600_emit_command_buffer(rctx->rings.gfx.cs, ((struct r600_cso_state*)atom)->cb); + r600_emit_command_buffer(rctx->b.rings.gfx.cs, ((struct r600_cso_state*)atom)->cb); } void r600_emit_alphatest_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_alphatest_state *a = (struct r600_alphatest_state*)atom; unsigned alpha_ref = a->sx_alpha_ref; - if (rctx->chip_class >= EVERGREEN && a->cb0_export_16bpc) { + if (rctx->b.chip_class >= EVERGREEN && a->cb0_export_16bpc) { alpha_ref &= ~0x1FFF; } @@ -89,7 +88,7 @@ static void r600_texture_barrier(struct pipe_context *ctx) { struct r600_context *rctx = (struct r600_context *)ctx; - rctx->flags |= R600_CONTEXT_INV_TEX_CACHE | + rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE | R600_CONTEXT_FLUSH_AND_INV_CB | R600_CONTEXT_FLUSH_AND_INV | R600_CONTEXT_WAIT_3D_IDLE; @@ -142,7 +141,7 @@ static void r600_bind_blend_state_internal(struct r600_context *rctx, rctx->cb_misc_state.blend_colormask = blend->cb_target_mask; update_cb = true; } - if (rctx->chip_class <= R700 && + if (rctx->b.chip_class <= R700 && rctx->cb_misc_state.cb_color_control != color_control) { rctx->cb_misc_state.cb_color_control = color_control; update_cb = true; @@ -178,25 +177,25 @@ static void r600_set_blend_color(struct pipe_context *ctx, void r600_emit_blend_color(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_blend_color *state = &rctx->blend_color.state; r600_write_context_reg_seq(cs, R_028414_CB_BLEND_RED, 4); - r600_write_value(cs, fui(state->color[0])); /* R_028414_CB_BLEND_RED */ - r600_write_value(cs, fui(state->color[1])); /* R_028418_CB_BLEND_GREEN */ - r600_write_value(cs, fui(state->color[2])); /* R_02841C_CB_BLEND_BLUE */ - r600_write_value(cs, fui(state->color[3])); /* R_028420_CB_BLEND_ALPHA */ + radeon_emit(cs, fui(state->color[0])); /* R_028414_CB_BLEND_RED */ + radeon_emit(cs, fui(state->color[1])); /* R_028418_CB_BLEND_GREEN */ + radeon_emit(cs, fui(state->color[2])); /* R_02841C_CB_BLEND_BLUE */ + radeon_emit(cs, fui(state->color[3])); /* R_028420_CB_BLEND_ALPHA */ } void r600_emit_vgt_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_vgt_state *a = (struct r600_vgt_state *)atom; r600_write_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, a->vgt_multi_prim_ib_reset_en); r600_write_context_reg_seq(cs, R_028408_VGT_INDX_OFFSET, 2); - r600_write_value(cs, a->vgt_indx_offset); /* R_028408_VGT_INDX_OFFSET */ - r600_write_value(cs, a->vgt_multi_prim_ib_reset_indx); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */ + radeon_emit(cs, a->vgt_indx_offset); /* R_028408_VGT_INDX_OFFSET */ + radeon_emit(cs, a->vgt_multi_prim_ib_reset_indx); /* R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX */ } static void r600_set_clip_state(struct pipe_context *ctx, @@ -227,15 +226,15 @@ static void r600_set_stencil_ref(struct pipe_context *ctx, void r600_emit_stencil_ref(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_stencil_ref_state *a = (struct r600_stencil_ref_state*)atom; r600_write_context_reg_seq(cs, R_028430_DB_STENCILREFMASK, 2); - r600_write_value(cs, /* R_028430_DB_STENCILREFMASK */ + radeon_emit(cs, /* R_028430_DB_STENCILREFMASK */ S_028430_STENCILREF(a->state.ref_value[0]) | S_028430_STENCILMASK(a->state.valuemask[0]) | S_028430_STENCILWRITEMASK(a->state.writemask[0])); - r600_write_value(cs, /* R_028434_DB_STENCILREFMASK_BF */ + radeon_emit(cs, /* R_028434_DB_STENCILREFMASK_BF */ S_028434_STENCILREF_BF(a->state.ref_value[1]) | S_028434_STENCILMASK_BF(a->state.valuemask[1]) | S_028434_STENCILWRITEMASK_BF(a->state.writemask[1])); @@ -282,7 +281,7 @@ static void r600_bind_dsa_state(struct pipe_context *ctx, void *state) ref.writemask[1] = dsa->writemask[1]; if (rctx->zwritemask != dsa->zwritemask) { rctx->zwritemask = dsa->zwritemask; - if (rctx->chip_class >= EVERGREEN) { + if (rctx->b.chip_class >= EVERGREEN) { /* work around some issue when not writting to zbuffer * we are having lockup on evergreen so do not enable * hyperz when not writting zbuffer @@ -299,7 +298,7 @@ static void r600_bind_dsa_state(struct pipe_context *ctx, void *state) rctx->alphatest_state.sx_alpha_test_control = dsa->sx_alpha_test_control; rctx->alphatest_state.sx_alpha_ref = dsa->alpha_ref; rctx->alphatest_state.atom.dirty = true; - if (rctx->chip_class >= EVERGREEN) { + if (rctx->b.chip_class >= EVERGREEN) { evergreen_update_db_shader_control(rctx); } else { r600_update_db_shader_control(rctx); @@ -336,7 +335,7 @@ static void r600_bind_rs_state(struct pipe_context *ctx, void *state) } /* Workaround for a missing scissor enable on r600. */ - if (rctx->chip_class == R600 && + if (rctx->b.chip_class == R600 && rs->scissor_enable != rctx->scissor.enable) { rctx->scissor.enable = rs->scissor_enable; rctx->scissor.atom.dirty = true; @@ -368,7 +367,7 @@ void r600_sampler_states_dirty(struct r600_context *rctx, { if (state->dirty_mask) { if (state->dirty_mask & state->has_bordercolor_mask) { - rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; } state->atom.num_dw = util_bitcount(state->dirty_mask & state->has_bordercolor_mask) * 11 + @@ -427,11 +426,11 @@ static void r600_bind_sampler_states(struct pipe_context *pipe, r600_sampler_states_dirty(rctx, &dst->states); /* Seamless cubemap state. */ - if (rctx->chip_class <= R700 && + if (rctx->b.chip_class <= R700 && seamless_cube_map != -1 && seamless_cube_map != rctx->seamless_cube_map.enabled) { /* change in TA_CNTL_AUX need a pipeline flush */ - rctx->flags |= R600_CONTEXT_WAIT_3D_IDLE; + rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; rctx->seamless_cube_map.enabled = seamless_cube_map; rctx->seamless_cube_map.atom.dirty = true; } @@ -500,8 +499,8 @@ static void r600_set_index_buffer(struct pipe_context *ctx, void r600_vertex_buffers_dirty(struct r600_context *rctx) { if (rctx->vertex_buffer_state.dirty_mask) { - rctx->flags |= R600_CONTEXT_INV_VERTEX_CACHE; - rctx->vertex_buffer_state.atom.num_dw = (rctx->chip_class >= EVERGREEN ? 12 : 11) * + rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE; + rctx->vertex_buffer_state.atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 12 : 11) * util_bitcount(rctx->vertex_buffer_state.dirty_mask); rctx->vertex_buffer_state.atom.dirty = true; } @@ -557,8 +556,8 @@ void r600_sampler_views_dirty(struct r600_context *rctx, struct r600_samplerview_state *state) { if (state->dirty_mask) { - rctx->flags |= R600_CONTEXT_INV_TEX_CACHE; - state->atom.num_dw = (rctx->chip_class >= EVERGREEN ? 14 : 13) * + rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE; + state->atom.num_dw = (rctx->b.chip_class >= EVERGREEN ? 14 : 13) * util_bitcount(state->dirty_mask); state->atom.dirty = true; } @@ -617,7 +616,7 @@ static void r600_set_sampler_views(struct pipe_context *pipe, unsigned shader, } /* Changing from array to non-arrays textures and vice versa requires * updating TEX_ARRAY_OVERRIDE in sampler states on R6xx-R7xx. */ - if (rctx->chip_class <= R700 && + if (rctx->b.chip_class <= R700 && (dst->states.enabled_mask & (1 << i)) && (rviews[i]->base.texture->target == PIPE_TEXTURE_1D_ARRAY || rviews[i]->base.texture->target == PIPE_TEXTURE_2D_ARRAY) != dst->is_array_sampler[i]) { @@ -674,16 +673,16 @@ static void r600_set_viewport_states(struct pipe_context *ctx, void r600_emit_viewport_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct pipe_viewport_state *state = &rctx->viewport.state; r600_write_context_reg_seq(cs, R_02843C_PA_CL_VPORT_XSCALE_0, 6); - r600_write_value(cs, fui(state->scale[0])); /* R_02843C_PA_CL_VPORT_XSCALE_0 */ - r600_write_value(cs, fui(state->translate[0])); /* R_028440_PA_CL_VPORT_XOFFSET_0 */ - r600_write_value(cs, fui(state->scale[1])); /* R_028444_PA_CL_VPORT_YSCALE_0 */ - r600_write_value(cs, fui(state->translate[1])); /* R_028448_PA_CL_VPORT_YOFFSET_0 */ - r600_write_value(cs, fui(state->scale[2])); /* R_02844C_PA_CL_VPORT_ZSCALE_0 */ - r600_write_value(cs, fui(state->translate[2])); /* R_028450_PA_CL_VPORT_ZOFFSET_0 */ + radeon_emit(cs, fui(state->scale[0])); /* R_02843C_PA_CL_VPORT_XSCALE_0 */ + radeon_emit(cs, fui(state->translate[0])); /* R_028440_PA_CL_VPORT_XOFFSET_0 */ + radeon_emit(cs, fui(state->scale[1])); /* R_028444_PA_CL_VPORT_YSCALE_0 */ + radeon_emit(cs, fui(state->translate[1])); /* R_028448_PA_CL_VPORT_YOFFSET_0 */ + radeon_emit(cs, fui(state->scale[2])); /* R_02844C_PA_CL_VPORT_ZSCALE_0 */ + radeon_emit(cs, fui(state->translate[2])); /* R_028450_PA_CL_VPORT_ZOFFSET_0 */ } /* Compute the key for the hw shader variant */ @@ -827,7 +826,7 @@ static void r600_bind_ps_state(struct pipe_context *ctx, void *state) r600_context_add_resource_size(ctx, (struct pipe_resource *)rctx->ps_shader->current->bo); - if (rctx->chip_class <= R700) { + if (rctx->b.chip_class <= R700) { bool multiwrite = rctx->ps_shader->current->shader.fs_write_all; if (rctx->cb_misc_state.multiwrite != multiwrite) { @@ -841,7 +840,7 @@ static void r600_bind_ps_state(struct pipe_context *ctx, void *state) rctx->cb_misc_state.atom.dirty = true; } - if (rctx->chip_class >= EVERGREEN) { + if (rctx->b.chip_class >= EVERGREEN) { evergreen_update_db_shader_control(rctx); } else { r600_update_db_shader_control(rctx); @@ -857,6 +856,7 @@ static void r600_bind_vs_state(struct pipe_context *ctx, void *state) rctx->vertex_shader.shader = rctx->vs_shader = (struct r600_pipe_shader_selector *)state; rctx->vertex_shader.atom.dirty = true; + rctx->b.streamout.stride_in_dw = rctx->vs_shader->so.stride; r600_context_add_resource_size(ctx, (struct pipe_resource *)rctx->vs_shader->current->bo); @@ -912,8 +912,8 @@ static void r600_delete_vs_state(struct pipe_context *ctx, void *state) void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state) { if (state->dirty_mask) { - rctx->flags |= R600_CONTEXT_INV_CONST_CACHE; - state->atom.num_dw = rctx->chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20 + rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE; + state->atom.num_dw = rctx->b.chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20 : util_bitcount(state->dirty_mask)*19; state->atom.dirty = true; } @@ -963,7 +963,7 @@ static void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint u_upload_data(rctx->uploader, 0, input->buffer_size, ptr, &cb->buffer_offset, &cb->buffer); } /* account it in gtt */ - rctx->gtt += input->buffer_size; + rctx->b.gtt += input->buffer_size; } else { /* Setup the hw buffer. */ cb->buffer_offset = input->buffer_offset; @@ -976,105 +976,6 @@ static void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint r600_constant_buffers_dirty(rctx, state); } -static struct pipe_stream_output_target * -r600_create_so_target(struct pipe_context *ctx, - struct pipe_resource *buffer, - unsigned buffer_offset, - unsigned buffer_size) -{ - struct r600_context *rctx = (struct r600_context *)ctx; - struct r600_so_target *t; - struct r600_resource *rbuffer = (struct r600_resource*)buffer; - - t = CALLOC_STRUCT(r600_so_target); - if (!t) { - return NULL; - } - - u_suballocator_alloc(rctx->allocator_so_filled_size, 4, - &t->buf_filled_size_offset, - (struct pipe_resource**)&t->buf_filled_size); - if (!t->buf_filled_size) { - FREE(t); - return NULL; - } - - t->b.reference.count = 1; - t->b.context = ctx; - pipe_resource_reference(&t->b.buffer, buffer); - t->b.buffer_offset = buffer_offset; - t->b.buffer_size = buffer_size; - - util_range_add(&rbuffer->valid_buffer_range, buffer_offset, - buffer_offset + buffer_size); - return &t->b; -} - -static void r600_so_target_destroy(struct pipe_context *ctx, - struct pipe_stream_output_target *target) -{ - struct r600_so_target *t = (struct r600_so_target*)target; - pipe_resource_reference(&t->b.buffer, NULL); - pipe_resource_reference((struct pipe_resource**)&t->buf_filled_size, NULL); - FREE(t); -} - -void r600_streamout_buffers_dirty(struct r600_context *rctx) -{ - rctx->streamout.num_dw_for_end = - 12 + /* flush_vgt_streamout */ - util_bitcount(rctx->streamout.enabled_mask) * 8 + /* STRMOUT_BUFFER_UPDATE */ - 3 /* set_streamout_enable(0) */; - - rctx->streamout.begin_atom.num_dw = - 12 + /* flush_vgt_streamout */ - 6 + /* set_streamout_enable */ - util_bitcount(rctx->streamout.enabled_mask) * 7 + /* SET_CONTEXT_REG */ - (rctx->family >= CHIP_RS780 && - rctx->family <= CHIP_RV740 ? util_bitcount(rctx->streamout.enabled_mask) * 5 : 0) + /* STRMOUT_BASE_UPDATE */ - util_bitcount(rctx->streamout.enabled_mask & rctx->streamout.append_bitmask) * 8 + /* STRMOUT_BUFFER_UPDATE */ - util_bitcount(rctx->streamout.enabled_mask & ~rctx->streamout.append_bitmask) * 6 + /* STRMOUT_BUFFER_UPDATE */ - (rctx->family > CHIP_R600 && rctx->family < CHIP_RS780 ? 2 : 0) + /* SURFACE_BASE_UPDATE */ - rctx->streamout.num_dw_for_end; - - rctx->streamout.begin_atom.dirty = true; -} - -static void r600_set_streamout_targets(struct pipe_context *ctx, - unsigned num_targets, - struct pipe_stream_output_target **targets, - unsigned append_bitmask) -{ - struct r600_context *rctx = (struct r600_context *)ctx; - unsigned i; - - /* Stop streamout. */ - if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) { - r600_emit_streamout_end(rctx); - } - - /* Set the new targets. */ - for (i = 0; i < num_targets; i++) { - pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], targets[i]); - r600_context_add_resource_size(ctx, targets[i]->buffer); - } - for (; i < rctx->streamout.num_targets; i++) { - pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], NULL); - } - - rctx->streamout.enabled_mask = (num_targets >= 1 && targets[0] ? 1 : 0) | - (num_targets >= 2 && targets[1] ? 2 : 0) | - (num_targets >= 3 && targets[2] ? 4 : 0) | - (num_targets >= 4 && targets[3] ? 8 : 0); - - rctx->streamout.num_targets = num_targets; - rctx->streamout.append_bitmask = append_bitmask; - - if (num_targets) { - r600_streamout_buffers_dirty(rctx); - } -} - static void r600_set_sample_mask(struct pipe_context *pipe, unsigned sample_mask) { struct r600_context *rctx = (struct r600_context*)pipe; @@ -1138,7 +1039,7 @@ static void r600_setup_buffer_constants(struct r600_context *rctx, int shader_ty cb.user_buffer = samplers->buffer_constants; cb.buffer_offset = 0; cb.buffer_size = array_size; - rctx->context.set_constant_buffer(&rctx->context, shader_type, R600_BUFFER_INFO_CONST_BUFFER, &cb); + rctx->b.b.set_constant_buffer(&rctx->b.b, shader_type, R600_BUFFER_INFO_CONST_BUFFER, &cb); pipe_resource_reference(&cb.buffer, NULL); } @@ -1168,7 +1069,7 @@ static void eg_setup_buffer_constants(struct r600_context *rctx, int shader_type cb.user_buffer = samplers->buffer_constants; cb.buffer_offset = 0; cb.buffer_size = array_size; - rctx->context.set_constant_buffer(&rctx->context, shader_type, R600_BUFFER_INFO_CONST_BUFFER, &cb); + rctx->b.b.set_constant_buffer(&rctx->b.b, shader_type, R600_BUFFER_INFO_CONST_BUFFER, &cb); pipe_resource_reference(&cb.buffer, NULL); } @@ -1197,7 +1098,7 @@ static void r600_setup_txq_cube_array_constants(struct r600_context *rctx, int s cb.user_buffer = samplers->txq_constants; cb.buffer_offset = 0; cb.buffer_size = array_size; - rctx->context.set_constant_buffer(&rctx->context, shader_type, R600_TXQ_CONST_BUFFER, &cb); + rctx->b.b.set_constant_buffer(&rctx->b.b, shader_type, R600_TXQ_CONST_BUFFER, &cb); pipe_resource_reference(&cb.buffer, NULL); } @@ -1228,7 +1129,7 @@ static bool r600_update_derived_state(struct r600_context *rctx) ((rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable) || (rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade))) { - if (rctx->chip_class >= EVERGREEN) + if (rctx->b.chip_class >= EVERGREEN) evergreen_update_ps_state(ctx, rctx->ps_shader->current); else r600_update_ps_state(ctx, rctx->ps_shader->current); @@ -1243,7 +1144,7 @@ static bool r600_update_derived_state(struct r600_context *rctx) /* on R600 we stuff masks + txq info into one constant buffer */ /* on evergreen we only need a txq info one */ - if (rctx->chip_class < EVERGREEN) { + if (rctx->b.chip_class < EVERGREEN) { if (rctx->ps_shader && rctx->ps_shader->current->shader.uses_tex_buffers) r600_setup_buffer_constants(rctx, PIPE_SHADER_FRAGMENT); if (rctx->vs_shader && rctx->vs_shader->current->shader.uses_tex_buffers) @@ -1261,7 +1162,7 @@ static bool r600_update_derived_state(struct r600_context *rctx) if (rctx->vs_shader && rctx->vs_shader->current->shader.has_txq_cube_array_z_comp) r600_setup_txq_cube_array_constants(rctx, PIPE_SHADER_VERTEX); - if (rctx->chip_class < EVERGREEN && rctx->ps_shader && rctx->vs_shader) { + if (rctx->b.chip_class < EVERGREEN && rctx->ps_shader && rctx->vs_shader) { if (!r600_adjust_gprs(rctx)) { /* discard rendering */ return false; @@ -1306,7 +1207,7 @@ static unsigned r600_conv_prim_to_gs_out(unsigned mode) void r600_emit_clip_misc_state(struct r600_context *rctx, struct r600_atom *atom) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_clip_misc_state *state = &rctx->clip_misc_state; r600_write_context_reg(cs, R_028810_PA_CL_CLIP_CNTL, @@ -1323,7 +1224,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info struct pipe_draw_info info = *dinfo; struct pipe_index_buffer ib = {}; unsigned i; - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; if (!info.count && (info.indexed || !info.count_from_stream_output)) { assert(0); @@ -1336,8 +1237,8 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info } /* make sure that the gfx ring is only one active */ - if (rctx->rings.dma.cs) { - rctx->rings.dma.flush(rctx, RADEON_FLUSH_ASYNC); + if (rctx->b.rings.dma.cs) { + rctx->b.rings.dma.flush(rctx, RADEON_FLUSH_ASYNC); } if (!r600_update_derived_state(rctx)) { @@ -1364,7 +1265,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info &out_offset, &out_buffer, &ptr); util_shorten_ubyte_elts_to_userptr( - &rctx->context, &ib, 0, ib.offset, info.count, ptr); + &rctx->b.b, &ib, 0, ib.offset, info.count, ptr); pipe_resource_reference(&ib.buffer, NULL); ib.user_buffer = NULL; @@ -1398,8 +1299,8 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info } /* Workaround for hardware deadlock on certain R600 ASICs: write into a CB register. */ - if (rctx->chip_class == R600) { - rctx->flags |= R600_CONTEXT_PS_PARTIAL_FLUSH; + if (rctx->b.chip_class == R600) { + rctx->b.flags |= R600_CONTEXT_PS_PARTIAL_FLUSH; rctx->cb_misc_state.atom.dirty = true; } @@ -1466,12 +1367,12 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info cs->buf[cs->cdw++] = info.count; cs->buf[cs->cdw++] = V_0287F0_DI_SRC_SEL_DMA; cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, rctx->predicate_drawing); - cs->buf[cs->cdw++] = r600_context_bo_reloc(rctx, &rctx->rings.gfx, (struct r600_resource*)ib.buffer, RADEON_USAGE_READ); + cs->buf[cs->cdw++] = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)ib.buffer, RADEON_USAGE_READ); } } else { if (info.count_from_stream_output) { struct r600_so_target *t = (struct r600_so_target*)info.count_from_stream_output; - uint64_t va = r600_resource_va(&rctx->screen->screen, (void*)t->buf_filled_size) + t->buf_filled_size_offset; + uint64_t va = r600_resource_va(&rctx->screen->b.b, (void*)t->buf_filled_size) + t->buf_filled_size_offset; r600_write_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, t->stride_in_dw); @@ -1483,7 +1384,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info cs->buf[cs->cdw++] = 0; /* unused */ cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0); - cs->buf[cs->cdw++] = r600_context_bo_reloc(rctx, &rctx->rings.gfx, t->buf_filled_size, RADEON_USAGE_READ); + cs->buf[cs->cdw++] = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, t->buf_filled_size, RADEON_USAGE_READ); } cs->buf[cs->cdw++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1, rctx->predicate_drawing); @@ -1550,7 +1451,7 @@ void r600_draw_rectangle(struct blitter_context *blitter, viewport.translate[1] = 0.0f; viewport.translate[2] = 0.0f; viewport.translate[3] = 0.0f; - rctx->context.set_viewport_states(&rctx->context, 0, 1, &viewport); + rctx->b.b.set_viewport_states(&rctx->b.b, 0, 1, &viewport); /* Upload vertices. The hw rectangle has only 3 vertices, * I guess the 4th one is derived from the first 3. @@ -1578,7 +1479,7 @@ void r600_draw_rectangle(struct blitter_context *blitter, } /* draw */ - util_draw_vertex_buffer(&rctx->context, NULL, buf, rctx->blitter->vb_slot, offset, + util_draw_vertex_buffer(&rctx->b.b, NULL, buf, rctx->blitter->vb_slot, offset, R600_PRIM_RECTANGLE_LIST, 3, 2); pipe_resource_reference(&buf, NULL); } @@ -1718,68 +1619,66 @@ bool sampler_state_needs_border_color(const struct pipe_sampler_state *state) void r600_emit_shader(struct r600_context *rctx, struct r600_atom *a) { - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; struct r600_pipe_shader *shader = ((struct r600_shader_state*)a)->shader->current; r600_emit_command_buffer(cs, &shader->command_buffer); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx, shader->bo, RADEON_USAGE_READ)); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, shader->bo, RADEON_USAGE_READ)); } /* keep this at the end of this file, please */ void r600_init_common_state_functions(struct r600_context *rctx) { - rctx->context.create_fs_state = r600_create_ps_state; - rctx->context.create_vs_state = r600_create_vs_state; - rctx->context.create_vertex_elements_state = r600_create_vertex_fetch_shader; - rctx->context.bind_blend_state = r600_bind_blend_state; - rctx->context.bind_depth_stencil_alpha_state = r600_bind_dsa_state; - rctx->context.bind_fragment_sampler_states = r600_bind_ps_sampler_states; - rctx->context.bind_fs_state = r600_bind_ps_state; - rctx->context.bind_rasterizer_state = r600_bind_rs_state; - rctx->context.bind_vertex_elements_state = r600_bind_vertex_elements; - rctx->context.bind_vertex_sampler_states = r600_bind_vs_sampler_states; - rctx->context.bind_vs_state = r600_bind_vs_state; - rctx->context.delete_blend_state = r600_delete_blend_state; - rctx->context.delete_depth_stencil_alpha_state = r600_delete_dsa_state; - rctx->context.delete_fs_state = r600_delete_ps_state; - rctx->context.delete_rasterizer_state = r600_delete_rs_state; - rctx->context.delete_sampler_state = r600_delete_sampler_state; - rctx->context.delete_vertex_elements_state = r600_delete_vertex_elements; - rctx->context.delete_vs_state = r600_delete_vs_state; - rctx->context.set_blend_color = r600_set_blend_color; - rctx->context.set_clip_state = r600_set_clip_state; - rctx->context.set_constant_buffer = r600_set_constant_buffer; - rctx->context.set_sample_mask = r600_set_sample_mask; - rctx->context.set_stencil_ref = r600_set_pipe_stencil_ref; - rctx->context.set_viewport_states = r600_set_viewport_states; - rctx->context.set_vertex_buffers = r600_set_vertex_buffers; - rctx->context.set_index_buffer = r600_set_index_buffer; - rctx->context.set_fragment_sampler_views = r600_set_ps_sampler_views; - rctx->context.set_vertex_sampler_views = r600_set_vs_sampler_views; - rctx->context.sampler_view_destroy = r600_sampler_view_destroy; - rctx->context.texture_barrier = r600_texture_barrier; - rctx->context.create_stream_output_target = r600_create_so_target; - rctx->context.stream_output_target_destroy = r600_so_target_destroy; - rctx->context.set_stream_output_targets = r600_set_streamout_targets; - rctx->context.draw_vbo = r600_draw_vbo; + rctx->b.b.create_fs_state = r600_create_ps_state; + rctx->b.b.create_vs_state = r600_create_vs_state; + rctx->b.b.create_vertex_elements_state = r600_create_vertex_fetch_shader; + rctx->b.b.bind_blend_state = r600_bind_blend_state; + rctx->b.b.bind_depth_stencil_alpha_state = r600_bind_dsa_state; + rctx->b.b.bind_fragment_sampler_states = r600_bind_ps_sampler_states; + rctx->b.b.bind_fs_state = r600_bind_ps_state; + rctx->b.b.bind_rasterizer_state = r600_bind_rs_state; + rctx->b.b.bind_vertex_elements_state = r600_bind_vertex_elements; + rctx->b.b.bind_vertex_sampler_states = r600_bind_vs_sampler_states; + rctx->b.b.bind_vs_state = r600_bind_vs_state; + rctx->b.b.delete_blend_state = r600_delete_blend_state; + rctx->b.b.delete_depth_stencil_alpha_state = r600_delete_dsa_state; + rctx->b.b.delete_fs_state = r600_delete_ps_state; + rctx->b.b.delete_rasterizer_state = r600_delete_rs_state; + rctx->b.b.delete_sampler_state = r600_delete_sampler_state; + rctx->b.b.delete_vertex_elements_state = r600_delete_vertex_elements; + rctx->b.b.delete_vs_state = r600_delete_vs_state; + rctx->b.b.set_blend_color = r600_set_blend_color; + rctx->b.b.set_clip_state = r600_set_clip_state; + rctx->b.b.set_constant_buffer = r600_set_constant_buffer; + rctx->b.b.set_sample_mask = r600_set_sample_mask; + rctx->b.b.set_stencil_ref = r600_set_pipe_stencil_ref; + rctx->b.b.set_viewport_states = r600_set_viewport_states; + rctx->b.b.set_vertex_buffers = r600_set_vertex_buffers; + rctx->b.b.set_index_buffer = r600_set_index_buffer; + rctx->b.b.set_fragment_sampler_views = r600_set_ps_sampler_views; + rctx->b.b.set_vertex_sampler_views = r600_set_vs_sampler_views; + rctx->b.b.sampler_view_destroy = r600_sampler_view_destroy; + rctx->b.b.texture_barrier = r600_texture_barrier; + rctx->b.b.set_stream_output_targets = r600_set_streamout_targets; + rctx->b.b.draw_vbo = r600_draw_vbo; } void r600_trace_emit(struct r600_context *rctx) { struct r600_screen *rscreen = rctx->screen; - struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs; uint64_t va; uint32_t reloc; - va = r600_resource_va(&rscreen->screen, (void*)rscreen->trace_bo); - reloc = r600_context_bo_reloc(rctx, &rctx->rings.gfx, rscreen->trace_bo, RADEON_USAGE_READWRITE); - r600_write_value(cs, PKT3(PKT3_MEM_WRITE, 3, 0)); - r600_write_value(cs, va & 0xFFFFFFFFUL); - r600_write_value(cs, (va >> 32UL) & 0xFFUL); - r600_write_value(cs, cs->cdw); - r600_write_value(cs, rscreen->cs_count); - r600_write_value(cs, PKT3(PKT3_NOP, 0, 0)); - r600_write_value(cs, reloc); + va = r600_resource_va(&rscreen->b.b, (void*)rscreen->trace_bo); + reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, rscreen->trace_bo, RADEON_USAGE_READWRITE); + radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0)); + radeon_emit(cs, va & 0xFFFFFFFFUL); + radeon_emit(cs, (va >> 32UL) & 0xFFUL); + radeon_emit(cs, cs->cdw); + radeon_emit(cs, rscreen->cs_count); + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); } diff --git a/src/gallium/drivers/r600/r600_texture.c b/src/gallium/drivers/r600/r600_texture.c index 742e98227e6..1d7948311da 100644 --- a/src/gallium/drivers/r600/r600_texture.c +++ b/src/gallium/drivers/r600/r600_texture.c @@ -149,7 +149,7 @@ static int r600_init_surface(struct r600_screen *rscreen, surface->array_size = 1; surface->last_level = ptex->last_level; - if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth && + if (rscreen->b.chip_class >= EVERGREEN && !is_flushed_depth && ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) { surface->bpe = 4; /* stencil is allocated separately on evergreen */ } else { @@ -229,7 +229,7 @@ static int r600_setup_surface(struct pipe_screen *screen, unsigned i; int r; - r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface); + r = rscreen->b.ws->surface_init(rscreen->b.ws, &rtex->surface); if (r) { return r; } @@ -275,7 +275,7 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen, struct radeon_surface *surface = &rtex->surface; struct r600_screen *rscreen = (struct r600_screen*)screen; - rscreen->ws->buffer_set_tiling(resource->buf, + rscreen->b.ws->buffer_set_tiling(resource->buf, NULL, surface->level[0].mode >= RADEON_SURF_MODE_1D ? RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR, @@ -287,7 +287,7 @@ static boolean r600_texture_get_handle(struct pipe_screen* screen, surface->mtilea, rtex->surface.level[0].pitch_bytes); - return rscreen->ws->buffer_get_handle(resource->buf, + return rscreen->b.ws->buffer_get_handle(resource->buf, rtex->surface.level[0].pitch_bytes, whandle); } @@ -340,11 +340,11 @@ void r600_texture_get_fmask_info(struct r600_screen *rscreen, /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption. * This can be fixed by writing a separate FMASK allocator specifically * for R600-R700 asics. */ - if (rscreen->chip_class <= R700) { + if (rscreen->b.chip_class <= R700) { fmask.bpe *= 2; } - if (rscreen->ws->surface_init(rscreen->ws, &fmask)) { + if (rscreen->b.ws->surface_init(rscreen->b.ws, &fmask)) { R600_ERR("Got error in surface_init while allocating FMASK.\n"); return; } @@ -485,7 +485,7 @@ r600_texture_create_object(struct pipe_screen *screen, rtex->htile = NULL; if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER | R600_RESOURCE_FLAG_FLUSHED_DEPTH)) && util_format_is_depth_or_stencil(base->format) && - rscreen->info.drm_minor >= 26 && + rscreen->b.info.drm_minor >= 26 && !(rscreen->debug_flags & DBG_NO_HYPERZ) && base->target == PIPE_TEXTURE_2D && rtex->surface.level[0].nblk_x >= 32 && @@ -493,7 +493,7 @@ r600_texture_create_object(struct pipe_screen *screen, unsigned sw = rtex->surface.level[0].nblk_x * rtex->surface.blk_w; unsigned sh = rtex->surface.level[0].nblk_y * rtex->surface.blk_h; unsigned htile_size; - unsigned npipes = rscreen->info.r600_num_tile_pipes; + unsigned npipes = rscreen->b.info.r600_num_tile_pipes; /* this alignment and htile size only apply to linear htile buffer */ sw = align(sw, 16 << 3); @@ -502,7 +502,7 @@ r600_texture_create_object(struct pipe_screen *screen, /* must be aligned with 2K * npipes */ htile_size = align(htile_size, (2 << 10) * npipes); - rtex->htile = (struct r600_resource*)pipe_buffer_create(&rscreen->screen, PIPE_BIND_CUSTOM, + rtex->htile = (struct r600_resource*)pipe_buffer_create(&rscreen->b.b, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, htile_size); if (rtex->htile == NULL) { /* this is not a fatal error as we can still keep rendering @@ -526,7 +526,7 @@ r600_texture_create_object(struct pipe_screen *screen, } else { /* This is usually the window framebuffer. We want it in VRAM, always. */ resource->buf = buf; - resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf); + resource->cs_buf = rscreen->b.ws->buffer_get_cs_handle(buf); resource->domains = RADEON_DOMAIN_VRAM; } @@ -628,7 +628,7 @@ struct pipe_resource *r600_texture_create(struct pipe_screen *screen, if (r) { return NULL; } - r = rscreen->ws->surface_best(rscreen->ws, &surface); + r = rscreen->b.ws->surface_best(rscreen->b.ws, &surface); if (r) { return NULL; } @@ -696,11 +696,11 @@ struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen, templ->depth0 != 1 || templ->last_level != 0) return NULL; - buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride); + buf = rscreen->b.ws->buffer_from_handle(rscreen->b.ws, whandle, &stride); if (!buf) return NULL; - rscreen->ws->buffer_get_tiling(buf, µ, ¯o, + rscreen->b.ws->buffer_get_tiling(buf, µ, ¯o, &surface.bankw, &surface.bankh, &surface.tile_split, &surface.stencil_tile_split, @@ -830,7 +830,7 @@ static void *r600_texture_transfer_map(struct pipe_context *ctx, /* Use a staging texture for uploads if the underlying BO is busy. */ if (!(usage & PIPE_TRANSFER_READ) && (r600_rings_is_buffer_referenced(rctx, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) || - rctx->ws->buffer_is_busy(rtex->resource.buf, RADEON_USAGE_READWRITE))) { + rctx->b.ws->buffer_is_busy(rtex->resource.buf, RADEON_USAGE_READWRITE))) { use_staging_texture = TRUE; } @@ -964,7 +964,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx, } else { buf = ((struct r600_resource *)transfer->resource)->cs_buf; } - rctx->ws->buffer_unmap(buf); + rctx->b.ws->buffer_unmap(buf); if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) { if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) { @@ -985,8 +985,8 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx, void r600_init_surface_functions(struct r600_context *r600) { - r600->context.create_surface = r600_create_surface; - r600->context.surface_destroy = r600_surface_destroy; + r600->b.b.create_surface = r600_create_surface; + r600->b.b.surface_destroy = r600_surface_destroy; } unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format, @@ -1051,7 +1051,7 @@ uint32_t r600_translate_texformat(struct pipe_screen *screen, uint32_t result = 0, word4 = 0, yuv_format = 0; const struct util_format_description *desc; boolean uniform = TRUE; - bool enable_s3tc = rscreen->info.drm_minor >= 9; + bool enable_s3tc = rscreen->b.info.drm_minor >= 9; bool is_srgb_valid = FALSE; const unsigned char swizzle_xxxx[4] = {0, 0, 0, 0}; const unsigned char swizzle_yyyy[4] = {1, 1, 1, 1}; @@ -1087,7 +1087,7 @@ uint32_t r600_translate_texformat(struct pipe_screen *screen, goto out_word4; case PIPE_FORMAT_X8Z24_UNORM: case PIPE_FORMAT_S8_UINT_Z24_UNORM: - if (rscreen->chip_class < EVERGREEN) + if (rscreen->b.chip_class < EVERGREEN) goto out_unknown; word4 |= r600_get_swizzle_combined(swizzle_yyyy, swizzle_view, FALSE); result = FMT_24_8; @@ -1112,7 +1112,7 @@ uint32_t r600_translate_texformat(struct pipe_screen *screen, result = FMT_8_24; goto out_word4; case PIPE_FORMAT_S8X24_UINT: - if (rscreen->chip_class < EVERGREEN) + if (rscreen->b.chip_class < EVERGREEN) goto out_unknown; word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); word4 |= r600_get_swizzle_combined(swizzle_xxxx, swizzle_view, FALSE); diff --git a/src/gallium/drivers/r600/r600_uvd.c b/src/gallium/drivers/r600/r600_uvd.c index bfdc4f1e5f5..eeac76f3e23 100644 --- a/src/gallium/drivers/r600/r600_uvd.c +++ b/src/gallium/drivers/r600/r600_uvd.c @@ -76,7 +76,7 @@ struct pipe_video_buffer *r600_video_buffer_create(struct pipe_context *pipe, template.height = align(tmpl->height / array_size, VL_MACROBLOCK_HEIGHT); vl_video_buffer_template(&templ, &template, resource_formats[0], 1, array_size, PIPE_USAGE_STATIC, 0); - if (ctx->chip_class < EVERGREEN || tmpl->interlaced) + if (ctx->b.chip_class < EVERGREEN || tmpl->interlaced) templ.flags = R600_RESOURCE_FLAG_TRANSFER; resources[0] = (struct r600_texture *) pipe->screen->resource_create(pipe->screen, &templ); @@ -85,7 +85,7 @@ struct pipe_video_buffer *r600_video_buffer_create(struct pipe_context *pipe, if (resource_formats[1] != PIPE_FORMAT_NONE) { vl_video_buffer_template(&templ, &template, resource_formats[1], 1, array_size, PIPE_USAGE_STATIC, 1); - if (ctx->chip_class < EVERGREEN || tmpl->interlaced) + if (ctx->b.chip_class < EVERGREEN || tmpl->interlaced) templ.flags = R600_RESOURCE_FLAG_TRANSFER; resources[1] = (struct r600_texture *) pipe->screen->resource_create(pipe->screen, &templ); @@ -95,7 +95,7 @@ struct pipe_video_buffer *r600_video_buffer_create(struct pipe_context *pipe, if (resource_formats[2] != PIPE_FORMAT_NONE) { vl_video_buffer_template(&templ, &template, resource_formats[2], 1, array_size, PIPE_USAGE_STATIC, 2); - if (ctx->chip_class < EVERGREEN || tmpl->interlaced) + if (ctx->b.chip_class < EVERGREEN || tmpl->interlaced) templ.flags = R600_RESOURCE_FLAG_TRANSFER; resources[2] = (struct r600_texture *) pipe->screen->resource_create(pipe->screen, &templ); @@ -111,14 +111,14 @@ struct pipe_video_buffer *r600_video_buffer_create(struct pipe_context *pipe, surfaces[i] = &resources[i]->surface; } - ruvd_join_surfaces(ctx->ws, templ.bind, pbs, surfaces); + ruvd_join_surfaces(ctx->b.ws, templ.bind, pbs, surfaces); for (i = 0; i < VL_NUM_COMPONENTS; ++i) { if (!resources[i]) continue; /* recreate the CS handle */ - resources[i]->resource.cs_buf = ctx->ws->buffer_get_cs_handle( + resources[i]->resource.cs_buf = ctx->b.ws->buffer_get_cs_handle( resources[i]->resource.buf); } @@ -169,7 +169,7 @@ struct pipe_video_codec *r600_uvd_create_decoder(struct pipe_context *context, { struct r600_context *ctx = (struct r600_context *)context; - return ruvd_create_decoder(context, templat, ctx->ws, r600_uvd_set_dtb); + return ruvd_create_decoder(context, templat, ctx->b.ws, r600_uvd_set_dtb); } int r600_uvd_get_video_param(struct pipe_screen *screen, @@ -180,7 +180,7 @@ int r600_uvd_get_video_param(struct pipe_screen *screen, struct r600_screen *rscreen = (struct r600_screen *)screen; /* UVD 2.x limits */ - if (rscreen->family < CHIP_PALM) { + if (rscreen->b.family < CHIP_PALM) { enum pipe_video_format codec = u_reduce_video_profile(profile); switch (param) { case PIPE_VIDEO_CAP_SUPPORTED: diff --git a/src/gallium/drivers/r600/sb/sb_core.cpp b/src/gallium/drivers/r600/sb/sb_core.cpp index d907508eb2a..f16a0ffa3cd 100644 --- a/src/gallium/drivers/r600/sb/sb_core.cpp +++ b/src/gallium/drivers/r600/sb/sb_core.cpp @@ -51,8 +51,8 @@ sb_context *r600_sb_context_create(struct r600_context *rctx) { sb_context *sctx = new sb_context(); - if (sctx->init(rctx->isa, translate_chip(rctx->family), - translate_chip_class(rctx->chip_class))) { + if (sctx->init(rctx->isa, translate_chip(rctx->b.family), + translate_chip_class(rctx->b.chip_class))) { delete sctx; sctx = NULL; } diff --git a/src/gallium/drivers/radeon/Makefile.sources b/src/gallium/drivers/radeon/Makefile.sources index d33c81b0fc0..740dbc79011 100644 --- a/src/gallium/drivers/radeon/Makefile.sources +++ b/src/gallium/drivers/radeon/Makefile.sources @@ -1,4 +1,6 @@ C_SOURCES := \ + r600_pipe_common.c \ + r600_streamout.c \ radeon_uvd.c LLVM_C_FILES := \ diff --git a/src/gallium/drivers/radeon/r600_cs.h b/src/gallium/drivers/radeon/r600_cs.h new file mode 100644 index 00000000000..c8bb2976f84 --- /dev/null +++ b/src/gallium/drivers/radeon/r600_cs.h @@ -0,0 +1,97 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Marek Olšák + */ + +/** + * This file contains helpers for writing commands to commands streams. + */ + +#ifndef R600_CS_H +#define R600_CS_H + +#include "../../winsys/radeon/drm/radeon_winsys.h" +#include "r600d_common.h" + +static INLINE uint64_t r600_resource_va(struct pipe_screen *screen, + struct pipe_resource *resource) +{ + struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; + struct r600_resource *rresource = (struct r600_resource*)resource; + + return rscreen->ws->buffer_get_virtual_address(rresource->cs_buf); +} + +static INLINE unsigned r600_context_bo_reloc(struct r600_common_context *rctx, + struct r600_ring *ring, + struct r600_resource *rbo, + enum radeon_bo_usage usage) +{ + assert(usage); + + /* Make sure that all previous rings are flushed so that everything + * looks serialized from the driver point of view. + */ + if (!ring->flushing) { + if (ring == &rctx->rings.gfx) { + if (rctx->rings.dma.cs) { + /* flush dma ring */ + rctx->rings.dma.flush(rctx, RADEON_FLUSH_ASYNC); + } + } else { + /* flush gfx ring */ + rctx->rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC); + } + } + return rctx->ws->cs_add_reloc(ring->cs, rbo->cs_buf, usage, rbo->domains) * 4; +} + +static INLINE void r600_write_config_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) +{ + assert(reg < R600_CONTEXT_REG_OFFSET); + assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS); + radeon_emit(cs, PKT3(PKT3_SET_CONFIG_REG, num, 0)); + radeon_emit(cs, (reg - R600_CONFIG_REG_OFFSET) >> 2); +} + +static INLINE void r600_write_config_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) +{ + r600_write_config_reg_seq(cs, reg, 1); + radeon_emit(cs, value); +} + +static INLINE void r600_write_context_reg_seq(struct radeon_winsys_cs *cs, unsigned reg, unsigned num) +{ + assert(reg >= R600_CONTEXT_REG_OFFSET); + assert(cs->cdw+2+num <= RADEON_MAX_CMDBUF_DWORDS); + radeon_emit(cs, PKT3(PKT3_SET_CONTEXT_REG, num, 0)); + radeon_emit(cs, (reg - R600_CONTEXT_REG_OFFSET) >> 2); +} + +static INLINE void r600_write_context_reg(struct radeon_winsys_cs *cs, unsigned reg, unsigned value) +{ + r600_write_context_reg_seq(cs, reg, 1); + radeon_emit(cs, value); +} + +#endif diff --git a/src/gallium/drivers/radeon/r600_pipe_common.c b/src/gallium/drivers/radeon/r600_pipe_common.c new file mode 100644 index 00000000000..cdfdc19bd0e --- /dev/null +++ b/src/gallium/drivers/radeon/r600_pipe_common.c @@ -0,0 +1,85 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: Marek Olšák + * + */ + +#include "r600_pipe_common.h" + +void r600_common_screen_init(struct r600_common_screen *rscreen, + struct radeon_winsys *ws) +{ + ws->query_info(ws, &rscreen->info); + + rscreen->ws = ws; + rscreen->family = rscreen->info.family; + rscreen->chip_class = rscreen->info.chip_class; +} + +bool r600_common_context_init(struct r600_common_context *rctx, + struct r600_common_screen *rscreen) +{ + rctx->ws = rscreen->ws; + rctx->family = rscreen->family; + rctx->chip_class = rscreen->chip_class; + + r600_streamout_init(rctx); + + rctx->allocator_so_filled_size = u_suballocator_create(&rctx->b, 4096, 4, + 0, PIPE_USAGE_STATIC, TRUE); + if (!rctx->allocator_so_filled_size) + return false; + + return true; +} + +void r600_common_context_cleanup(struct r600_common_context *rctx) +{ + if (rctx->allocator_so_filled_size) { + u_suballocator_destroy(rctx->allocator_so_filled_size); + } +} + +void r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r) +{ + struct r600_common_context *rctx = (struct r600_common_context *)ctx; + struct r600_resource *rr = (struct r600_resource *)r; + + if (r == NULL) { + return; + } + + /* + * The idea is to compute a gross estimate of memory requirement of + * each draw call. After each draw call, memory will be precisely + * accounted. So the uncertainty is only on the current draw call. + * In practice this gave very good estimate (+/- 10% of the target + * memory limit). + */ + if (rr->domains & RADEON_DOMAIN_GTT) { + rctx->gtt += rr->buf->size; + } + if (rr->domains & RADEON_DOMAIN_VRAM) { + rctx->vram += rr->buf->size; + } +} diff --git a/src/gallium/drivers/radeon/r600_pipe_common.h b/src/gallium/drivers/radeon/r600_pipe_common.h new file mode 100644 index 00000000000..0bfdb47b62b --- /dev/null +++ b/src/gallium/drivers/radeon/r600_pipe_common.h @@ -0,0 +1,179 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: Marek Olšák + * + */ + +/** + * This file contains common screen and context structures and functions + * for r600g and radeonsi. + */ + +#ifndef R600_PIPE_COMMON_H +#define R600_PIPE_COMMON_H + +#include "../../winsys/radeon/drm/radeon_winsys.h" + +#include "util/u_range.h" +#include "util/u_suballoc.h" +#include "util/u_transfer.h" + +/* read caches */ +#define R600_CONTEXT_INV_VERTEX_CACHE (1 << 0) +#define R600_CONTEXT_INV_TEX_CACHE (1 << 1) +#define R600_CONTEXT_INV_CONST_CACHE (1 << 2) +/* read-write caches */ +#define R600_CONTEXT_STREAMOUT_FLUSH (1 << 8) +#define R600_CONTEXT_FLUSH_AND_INV (1 << 9) +#define R600_CONTEXT_FLUSH_AND_INV_CB_META (1 << 10) +#define R600_CONTEXT_FLUSH_AND_INV_DB_META (1 << 11) +#define R600_CONTEXT_FLUSH_AND_INV_DB (1 << 12) +#define R600_CONTEXT_FLUSH_AND_INV_CB (1 << 13) +/* engine synchronization */ +#define R600_CONTEXT_PS_PARTIAL_FLUSH (1 << 16) +#define R600_CONTEXT_WAIT_3D_IDLE (1 << 17) +#define R600_CONTEXT_WAIT_CP_DMA_IDLE (1 << 18) + +struct r600_common_context; + +struct r600_resource { + struct u_resource b; + + /* Winsys objects. */ + struct pb_buffer *buf; + struct radeon_winsys_cs_handle *cs_buf; + + /* Resource state. */ + enum radeon_bo_domain domains; + + /* The buffer range which is initialized (with a write transfer, + * streamout, DMA, or as a random access target). The rest of + * the buffer is considered invalid and can be mapped unsynchronized. + * + * This allows unsychronized mapping of a buffer range which hasn't + * been used yet. It's for applications which forget to use + * the unsynchronized map flag and expect the driver to figure it out. + */ + struct util_range valid_buffer_range; +}; + +struct r600_common_screen { + struct pipe_screen b; + struct radeon_winsys *ws; + enum radeon_family family; + enum chip_class chip_class; + struct radeon_info info; +}; + +/* This encapsulates a state or an operation which can emitted into the GPU + * command stream. */ +struct r600_atom { + void (*emit)(struct r600_common_context *ctx, struct r600_atom *state); + unsigned num_dw; + bool dirty; +}; + +struct r600_so_target { + struct pipe_stream_output_target b; + + /* The buffer where BUFFER_FILLED_SIZE is stored. */ + struct r600_resource *buf_filled_size; + unsigned buf_filled_size_offset; + + unsigned stride_in_dw; +}; + +struct r600_streamout { + struct r600_atom begin_atom; + bool begin_emitted; + unsigned num_dw_for_end; + + unsigned enabled_mask; + unsigned num_targets; + struct r600_so_target *targets[PIPE_MAX_SO_BUFFERS]; + + unsigned append_bitmask; + bool suspended; + + /* External state which comes from the vertex shader, + * it must be set explicitly when binding a shader. */ + unsigned *stride_in_dw; +}; + +struct r600_ring { + struct radeon_winsys_cs *cs; + bool flushing; + void (*flush)(void *ctx, unsigned flags); +}; + +struct r600_rings { + struct r600_ring gfx; + struct r600_ring dma; +}; + +struct r600_common_context { + struct pipe_context b; /* base class */ + + struct radeon_winsys *ws; + enum radeon_family family; + enum chip_class chip_class; + struct r600_rings rings; + + struct u_suballocator *allocator_so_filled_size; + + /* Current unaccounted memory usage. */ + uint64_t vram; + uint64_t gtt; + + /* States. */ + struct r600_streamout streamout; + + /* Additional context states. */ + unsigned flags; /* flush flags */ +}; + +/* r600_common_pipe.c */ +void r600_common_screen_init(struct r600_common_screen *rscreen, + struct radeon_winsys *ws); +bool r600_common_context_init(struct r600_common_context *rctx, + struct r600_common_screen *rscreen); +void r600_common_context_cleanup(struct r600_common_context *rctx); +void r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r); + +/* r600_streamout.c */ +void r600_streamout_buffers_dirty(struct r600_common_context *rctx); +void r600_set_streamout_targets(struct pipe_context *ctx, + unsigned num_targets, + struct pipe_stream_output_target **targets, + unsigned append_bitmask); +void r600_emit_streamout_end(struct r600_common_context *rctx); +void r600_streamout_init(struct r600_common_context *rctx); + +/* Inline helpers. */ + +static INLINE struct r600_resource *r600_resource(struct pipe_resource *r) +{ + return (struct r600_resource*)r; +} + +#endif diff --git a/src/gallium/drivers/radeon/r600_streamout.c b/src/gallium/drivers/radeon/r600_streamout.c new file mode 100644 index 00000000000..ab40630920b --- /dev/null +++ b/src/gallium/drivers/radeon/r600_streamout.c @@ -0,0 +1,338 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: Marek Olšák + * + */ + +#include "r600_pipe_common.h" +#include "r600_cs.h" + +#include "util/u_memory.h" + +static struct pipe_stream_output_target * +r600_create_so_target(struct pipe_context *ctx, + struct pipe_resource *buffer, + unsigned buffer_offset, + unsigned buffer_size) +{ + struct r600_common_context *rctx = (struct r600_common_context *)ctx; + struct r600_so_target *t; + struct r600_resource *rbuffer = (struct r600_resource*)buffer; + + t = CALLOC_STRUCT(r600_so_target); + if (!t) { + return NULL; + } + + u_suballocator_alloc(rctx->allocator_so_filled_size, 4, + &t->buf_filled_size_offset, + (struct pipe_resource**)&t->buf_filled_size); + if (!t->buf_filled_size) { + FREE(t); + return NULL; + } + + t->b.reference.count = 1; + t->b.context = ctx; + pipe_resource_reference(&t->b.buffer, buffer); + t->b.buffer_offset = buffer_offset; + t->b.buffer_size = buffer_size; + + util_range_add(&rbuffer->valid_buffer_range, buffer_offset, + buffer_offset + buffer_size); + return &t->b; +} + +static void r600_so_target_destroy(struct pipe_context *ctx, + struct pipe_stream_output_target *target) +{ + struct r600_so_target *t = (struct r600_so_target*)target; + pipe_resource_reference(&t->b.buffer, NULL); + pipe_resource_reference((struct pipe_resource**)&t->buf_filled_size, NULL); + FREE(t); +} + +void r600_streamout_buffers_dirty(struct r600_common_context *rctx) +{ + rctx->streamout.num_dw_for_end = + 12 + /* flush_vgt_streamout */ + util_bitcount(rctx->streamout.enabled_mask) * 8 + /* STRMOUT_BUFFER_UPDATE */ + 3 /* set_streamout_enable(0) */; + + rctx->streamout.begin_atom.num_dw = + 12 + /* flush_vgt_streamout */ + 6 + /* set_streamout_enable */ + util_bitcount(rctx->streamout.enabled_mask) * 7 + /* SET_CONTEXT_REG */ + (rctx->family >= CHIP_RS780 && + rctx->family <= CHIP_RV740 ? util_bitcount(rctx->streamout.enabled_mask) * 5 : 0) + /* STRMOUT_BASE_UPDATE */ + util_bitcount(rctx->streamout.enabled_mask & rctx->streamout.append_bitmask) * 8 + /* STRMOUT_BUFFER_UPDATE */ + util_bitcount(rctx->streamout.enabled_mask & ~rctx->streamout.append_bitmask) * 6 + /* STRMOUT_BUFFER_UPDATE */ + (rctx->family > CHIP_R600 && rctx->family < CHIP_RS780 ? 2 : 0) + /* SURFACE_BASE_UPDATE */ + rctx->streamout.num_dw_for_end; + + rctx->streamout.begin_atom.dirty = true; +} + +void r600_set_streamout_targets(struct pipe_context *ctx, + unsigned num_targets, + struct pipe_stream_output_target **targets, + unsigned append_bitmask) +{ + struct r600_common_context *rctx = (struct r600_common_context *)ctx; + unsigned i; + + /* Stop streamout. */ + if (rctx->streamout.num_targets && rctx->streamout.begin_emitted) { + r600_emit_streamout_end(rctx); + } + + /* Set the new targets. */ + for (i = 0; i < num_targets; i++) { + pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], targets[i]); + r600_context_add_resource_size(ctx, targets[i]->buffer); + } + for (; i < rctx->streamout.num_targets; i++) { + pipe_so_target_reference((struct pipe_stream_output_target**)&rctx->streamout.targets[i], NULL); + } + + rctx->streamout.enabled_mask = (num_targets >= 1 && targets[0] ? 1 : 0) | + (num_targets >= 2 && targets[1] ? 2 : 0) | + (num_targets >= 3 && targets[2] ? 4 : 0) | + (num_targets >= 4 && targets[3] ? 8 : 0); + + rctx->streamout.num_targets = num_targets; + rctx->streamout.append_bitmask = append_bitmask; + + if (num_targets) { + r600_streamout_buffers_dirty(rctx); + } +} + +static void r600_flush_vgt_streamout(struct r600_common_context *rctx) +{ + struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + + r600_write_config_reg(cs, R_008490_CP_STRMOUT_CNTL, 0); + + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0)); + + radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); + radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */ + radeon_emit(cs, R_008490_CP_STRMOUT_CNTL >> 2); /* register */ + radeon_emit(cs, 0); + radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* reference value */ + radeon_emit(cs, S_008490_OFFSET_UPDATE_DONE(1)); /* mask */ + radeon_emit(cs, 4); /* poll interval */ +} + +static void evergreen_flush_vgt_streamout(struct r600_common_context *rctx) +{ + struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + + r600_write_config_reg(cs, R_0084FC_CP_STRMOUT_CNTL, 0); + + radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); + radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0)); + + radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); + radeon_emit(cs, WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */ + radeon_emit(cs, R_0084FC_CP_STRMOUT_CNTL >> 2); /* register */ + radeon_emit(cs, 0); + radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */ + radeon_emit(cs, S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */ + radeon_emit(cs, 4); /* poll interval */ +} + +static void r600_set_streamout_enable(struct r600_common_context *rctx, unsigned buffer_enable_bit) +{ + struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + + if (buffer_enable_bit) { + r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(1)); + r600_write_context_reg(cs, R_028B20_VGT_STRMOUT_BUFFER_EN, buffer_enable_bit); + } else { + r600_write_context_reg(cs, R_028AB0_VGT_STRMOUT_EN, S_028AB0_STREAMOUT(0)); + } +} + +static void evergreen_set_streamout_enable(struct r600_common_context *rctx, unsigned buffer_enable_bit) +{ + struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + + if (buffer_enable_bit) { + r600_write_context_reg_seq(cs, R_028B94_VGT_STRMOUT_CONFIG, 2); + radeon_emit(cs, S_028B94_STREAMOUT_0_EN(1)); /* R_028B94_VGT_STRMOUT_CONFIG */ + radeon_emit(cs, S_028B98_STREAM_0_BUFFER_EN(buffer_enable_bit)); /* R_028B98_VGT_STRMOUT_BUFFER_CONFIG */ + } else { + r600_write_context_reg(cs, R_028B94_VGT_STRMOUT_CONFIG, S_028B94_STREAMOUT_0_EN(0)); + } +} + +static void r600_emit_reloc(struct r600_common_context *rctx, + struct r600_ring *ring, struct r600_resource *rbo, + enum radeon_bo_usage usage) +{ + struct radeon_winsys_cs *cs = ring->cs; + bool has_vm = ((struct r600_common_screen*)rctx->b.screen)->info.r600_virtual_address; + unsigned reloc = r600_context_bo_reloc(rctx, ring, rbo, usage); + + if (!has_vm) { + radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); + radeon_emit(cs, reloc); + } +} + +static void r600_emit_streamout_begin(struct r600_common_context *rctx, struct r600_atom *atom) +{ + struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct r600_so_target **t = rctx->streamout.targets; + unsigned *stride_in_dw = rctx->streamout.stride_in_dw; + unsigned i, update_flags = 0; + uint64_t va; + + if (rctx->chip_class >= EVERGREEN) { + evergreen_flush_vgt_streamout(rctx); + evergreen_set_streamout_enable(rctx, rctx->streamout.enabled_mask); + } else { + r600_flush_vgt_streamout(rctx); + r600_set_streamout_enable(rctx, rctx->streamout.enabled_mask); + } + + for (i = 0; i < rctx->streamout.num_targets; i++) { + if (!t[i]) + continue; + + t[i]->stride_in_dw = stride_in_dw[i]; + + va = r600_resource_va(rctx->b.screen, + (void*)t[i]->b.buffer); + + update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i); + + r600_write_context_reg_seq(cs, R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16*i, 3); + radeon_emit(cs, (t[i]->b.buffer_offset + + t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */ + radeon_emit(cs, stride_in_dw[i]); /* VTX_STRIDE (in DW) */ + radeon_emit(cs, va >> 8); /* BUFFER_BASE */ + + r600_emit_reloc(rctx, &rctx->rings.gfx, r600_resource(t[i]->b.buffer), + RADEON_USAGE_WRITE); + + /* R7xx requires this packet after updating BUFFER_BASE. + * Without this, R7xx locks up. */ + if (rctx->family >= CHIP_RS780 && rctx->family <= CHIP_RV740) { + radeon_emit(cs, PKT3(PKT3_STRMOUT_BASE_UPDATE, 1, 0)); + radeon_emit(cs, i); + radeon_emit(cs, va >> 8); + + r600_emit_reloc(rctx, &rctx->rings.gfx, r600_resource(t[i]->b.buffer), + RADEON_USAGE_WRITE); + } + + if (rctx->streamout.append_bitmask & (1 << i)) { + va = r600_resource_va(rctx->b.screen, + (void*)t[i]->buf_filled_size) + t[i]->buf_filled_size_offset; + /* Append. */ + radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0)); + radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) | + STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, va); /* src address lo */ + radeon_emit(cs, va >> 32); /* src address hi */ + + r600_emit_reloc(rctx, &rctx->rings.gfx, t[i]->buf_filled_size, + RADEON_USAGE_READ); + } else { + /* Start from the beginning. */ + radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0)); + radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) | + STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, t[i]->b.buffer_offset >> 2); /* buffer offset in DW */ + radeon_emit(cs, 0); /* unused */ + } + } + + if (rctx->family > CHIP_R600 && rctx->family < CHIP_RV770) { + radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); + radeon_emit(cs, update_flags); + } + rctx->streamout.begin_emitted = true; +} + +void r600_emit_streamout_end(struct r600_common_context *rctx) +{ + struct radeon_winsys_cs *cs = rctx->rings.gfx.cs; + struct r600_so_target **t = rctx->streamout.targets; + unsigned i; + uint64_t va; + + if (rctx->chip_class >= EVERGREEN) { + evergreen_flush_vgt_streamout(rctx); + } else { + r600_flush_vgt_streamout(rctx); + } + + for (i = 0; i < rctx->streamout.num_targets; i++) { + if (!t[i]) + continue; + + va = r600_resource_va(rctx->b.screen, + (void*)t[i]->buf_filled_size) + t[i]->buf_filled_size_offset; + radeon_emit(cs, PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0)); + radeon_emit(cs, STRMOUT_SELECT_BUFFER(i) | + STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) | + STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */ + radeon_emit(cs, va); /* dst address lo */ + radeon_emit(cs, va >> 32); /* dst address hi */ + radeon_emit(cs, 0); /* unused */ + radeon_emit(cs, 0); /* unused */ + + r600_emit_reloc(rctx, &rctx->rings.gfx, t[i]->buf_filled_size, + RADEON_USAGE_WRITE); + } + + if (rctx->chip_class >= EVERGREEN) { + evergreen_set_streamout_enable(rctx, 0); + } else { + r600_set_streamout_enable(rctx, 0); + } + + rctx->streamout.begin_emitted = false; + + if (rctx->chip_class >= R700) { + rctx->flags |= R600_CONTEXT_STREAMOUT_FLUSH; + } else { + rctx->flags |= R600_CONTEXT_FLUSH_AND_INV; + } +} + +void r600_streamout_init(struct r600_common_context *rctx) +{ + rctx->b.create_stream_output_target = r600_create_so_target; + rctx->b.stream_output_target_destroy = r600_so_target_destroy; + rctx->streamout.begin_atom.emit = r600_emit_streamout_begin; +} diff --git a/src/gallium/drivers/radeon/r600d_common.h b/src/gallium/drivers/radeon/r600d_common.h new file mode 100644 index 00000000000..b179757b424 --- /dev/null +++ b/src/gallium/drivers/radeon/r600d_common.h @@ -0,0 +1,143 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Marek Olšák + */ + +#ifndef R600D_COMMON_H +#define R600D_COMMON_H + +#define R600_CONFIG_REG_OFFSET 0x08000 +#define R600_CONTEXT_REG_OFFSET 0x28000 + +#define PKT_TYPE_S(x) (((x) & 0x3) << 30) +#define PKT_COUNT_S(x) (((x) & 0x3FFF) << 16) +#define PKT3_IT_OPCODE_S(x) (((x) & 0xFF) << 8) +#define PKT3_PREDICATE(x) (((x) >> 0) & 0x1) +#define PKT3(op, count, predicate) (PKT_TYPE_S(3) | PKT_COUNT_S(count) | PKT3_IT_OPCODE_S(op) | PKT3_PREDICATE(predicate)) + +#define RADEON_CP_PACKET3_COMPUTE_MODE 0x00000002 + +#define PKT3_NOP 0x10 +#define PKT3_STRMOUT_BUFFER_UPDATE 0x34 +#define STRMOUT_STORE_BUFFER_FILLED_SIZE 1 +#define STRMOUT_OFFSET_SOURCE(x) (((x) & 0x3) << 1) +#define STRMOUT_OFFSET_FROM_PACKET 0 +#define STRMOUT_OFFSET_FROM_VGT_FILLED_SIZE 1 +#define STRMOUT_OFFSET_FROM_MEM 2 +#define STRMOUT_OFFSET_NONE 3 +#define STRMOUT_SELECT_BUFFER(x) (((x) & 0x3) << 8) +#define PKT3_WAIT_REG_MEM 0x3C +#define WAIT_REG_MEM_EQUAL 3 +#define PKT3_EVENT_WRITE 0x46 +#define PKT3_SET_CONFIG_REG 0x68 +#define PKT3_SET_CONTEXT_REG 0x69 +#define PKT3_STRMOUT_BASE_UPDATE 0x72 /* r700 only */ +#define PKT3_SURFACE_BASE_UPDATE 0x73 /* r600 only */ +#define SURFACE_BASE_UPDATE_DEPTH (1 << 0) +#define SURFACE_BASE_UPDATE_COLOR(x) (2 << (x)) +#define SURFACE_BASE_UPDATE_COLOR_NUM(x) (((1 << x) - 1) << 1) +#define SURFACE_BASE_UPDATE_STRMOUT(x) (0x200 << (x)) + +#define EVENT_TYPE_PS_PARTIAL_FLUSH 0x10 +#define EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT 0x14 +#define EVENT_TYPE_ZPASS_DONE 0x15 +#define EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT 0x16 +#define EVENT_TYPE_PIPELINESTAT_START 25 +#define EVENT_TYPE_PIPELINESTAT_STOP 26 +#define EVENT_TYPE_SAMPLE_PIPELINESTAT 30 +#define EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH 0x1f +#define EVENT_TYPE_SAMPLE_STREAMOUTSTATS 0x20 +#define EVENT_TYPE_FLUSH_AND_INV_DB_META 0x2c /* supported on r700+ */ +#define EVENT_TYPE_FLUSH_AND_INV_CB_META 46 /* supported on r700+ */ +#define EVENT_TYPE(x) ((x) << 0) +#define EVENT_INDEX(x) ((x) << 8) + /* 0 - any non-TS event + * 1 - ZPASS_DONE + * 2 - SAMPLE_PIPELINESTAT + * 3 - SAMPLE_STREAMOUTSTAT* + * 4 - *S_PARTIAL_FLUSH + * 5 - TS events + */ + +/* R600-R700*/ +#define R_008490_CP_STRMOUT_CNTL 0x008490 +#define S_008490_OFFSET_UPDATE_DONE(x) (((x) & 0x1) << 0) +#define R_028AB0_VGT_STRMOUT_EN 0x028AB0 +#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0) +#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1) +#define C_028AB0_STREAMOUT 0xFFFFFFFE +#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20 +#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0) +#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1) +#define C_028B20_BUFFER_0_EN 0xFFFFFFFE +#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1) +#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1) +#define C_028B20_BUFFER_1_EN 0xFFFFFFFD +#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2) +#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1) +#define C_028B20_BUFFER_2_EN 0xFFFFFFFB +#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3) +#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1) +#define C_028B20_BUFFER_3_EN 0xFFFFFFF7 +#define R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 0x028AD0 + +/* EG+ */ +#define R_0084FC_CP_STRMOUT_CNTL 0x0084FC +#define S_0084FC_OFFSET_UPDATE_DONE(x) (((x) & 0x1) << 0) +#define R_028B94_VGT_STRMOUT_CONFIG 0x028B94 +#define S_028B94_STREAMOUT_0_EN(x) (((x) & 0x1) << 0) +#define G_028B94_STREAMOUT_0_EN(x) (((x) >> 0) & 0x1) +#define C_028B94_STREAMOUT_0_EN 0xFFFFFFFE +#define S_028B94_STREAMOUT_1_EN(x) (((x) & 0x1) << 1) +#define G_028B94_STREAMOUT_1_EN(x) (((x) >> 1) & 0x1) +#define C_028B94_STREAMOUT_1_EN 0xFFFFFFFD +#define S_028B94_STREAMOUT_2_EN(x) (((x) & 0x1) << 2) +#define G_028B94_STREAMOUT_2_EN(x) (((x) >> 2) & 0x1) +#define C_028B94_STREAMOUT_2_EN 0xFFFFFFFB +#define S_028B94_STREAMOUT_3_EN(x) (((x) & 0x1) << 3) +#define G_028B94_STREAMOUT_3_EN(x) (((x) >> 3) & 0x1) +#define C_028B94_STREAMOUT_3_EN 0xFFFFFFF7 +#define S_028B94_RAST_STREAM(x) (((x) & 0x07) << 4) +#define G_028B94_RAST_STREAM(x) (((x) >> 4) & 0x07) +#define C_028B94_RAST_STREAM 0xFFFFFF8F +#define S_028B94_RAST_STREAM_MASK(x) (((x) & 0x0F) << 8) /* SI+ */ +#define G_028B94_RAST_STREAM_MASK(x) (((x) >> 8) & 0x0F) +#define C_028B94_RAST_STREAM_MASK 0xFFFFF0FF +#define S_028B94_USE_RAST_STREAM_MASK(x) (((x) & 0x1) << 31) /* SI+ */ +#define G_028B94_USE_RAST_STREAM_MASK(x) (((x) >> 31) & 0x1) +#define C_028B94_USE_RAST_STREAM_MASK 0x7FFFFFFF +#define R_028B98_VGT_STRMOUT_BUFFER_CONFIG 0x028B98 +#define S_028B98_STREAM_0_BUFFER_EN(x) (((x) & 0x0F) << 0) +#define G_028B98_STREAM_0_BUFFER_EN(x) (((x) >> 0) & 0x0F) +#define C_028B98_STREAM_0_BUFFER_EN 0xFFFFFFF0 +#define S_028B98_STREAM_1_BUFFER_EN(x) (((x) & 0x0F) << 4) +#define G_028B98_STREAM_1_BUFFER_EN(x) (((x) >> 4) & 0x0F) +#define C_028B98_STREAM_1_BUFFER_EN 0xFFFFFF0F +#define S_028B98_STREAM_2_BUFFER_EN(x) (((x) & 0x0F) << 8) +#define G_028B98_STREAM_2_BUFFER_EN(x) (((x) >> 8) & 0x0F) +#define C_028B98_STREAM_2_BUFFER_EN 0xFFFFF0FF +#define S_028B98_STREAM_3_BUFFER_EN(x) (((x) & 0x0F) << 12) +#define G_028B98_STREAM_3_BUFFER_EN(x) (((x) >> 12) & 0x0F) +#define C_028B98_STREAM_3_BUFFER_EN 0xFFFF0FFF + +#endif -- 2.30.2