#include <errno.h>
#include <unistd.h>
-/* Get backends mask */
-void r600_get_backend_mask(struct r600_context *ctx)
-{
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
- struct r600_resource *buffer;
- uint32_t *results;
- unsigned num_backends = ctx->screen->b.info.r600_num_backends;
- unsigned i, mask = 0;
- uint64_t va;
-
- /* if backend_map query is supported by the kernel */
- if (ctx->screen->b.info.r600_backend_map_valid) {
- unsigned num_tile_pipes = ctx->screen->b.info.r600_num_tile_pipes;
- unsigned backend_map = ctx->screen->b.info.r600_backend_map;
- unsigned item_width, item_mask;
-
- if (ctx->b.chip_class >= EVERGREEN) {
- item_width = 4;
- item_mask = 0x7;
- } else {
- item_width = 2;
- item_mask = 0x3;
- }
-
- while(num_tile_pipes--) {
- i = backend_map & item_mask;
- mask |= (1<<i);
- backend_map >>= item_width;
- }
- if (mask != 0) {
- ctx->backend_mask = mask;
- return;
- }
- }
-
- /* otherwise backup path for older kernels */
-
- /* create buffer for event data */
- buffer = (struct r600_resource*)
- pipe_buffer_create(&ctx->screen->b.b, PIPE_BIND_CUSTOM,
- PIPE_USAGE_STAGING, ctx->max_db*16);
- if (!buffer)
- goto err;
- va = r600_resource_va(&ctx->screen->b.b, (void*)buffer);
-
- /* initialize buffer with zeroes */
- results = r600_buffer_map_sync_with_rings(&ctx->b, buffer, PIPE_TRANSFER_WRITE);
- if (results) {
- memset(results, 0, ctx->max_db * 4 * 4);
- ctx->b.ws->buffer_unmap(buffer->cs_buf);
-
- /* emit EVENT_WRITE for ZPASS_DONE */
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
- cs->buf[cs->cdw++] = va;
- cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
-
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, buffer, RADEON_USAGE_WRITE);
-
- /* analyze results */
- results = r600_buffer_map_sync_with_rings(&ctx->b, buffer, PIPE_TRANSFER_READ);
- if (results) {
- for(i = 0; i < ctx->max_db; i++) {
- /* at least highest bit will be set if backend is used */
- if (results[i*4 + 1])
- mask |= (1<<i);
- }
- ctx->b.ws->buffer_unmap(buffer->cs_buf);
- }
- }
-
- pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
-
- if (mask != 0) {
- ctx->backend_mask = mask;
- return;
- }
-
-err:
- /* fallback to old method - set num_backends lower bits to 1 */
- ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
- return;
-}
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
boolean count_draw_in)
{
+ struct radeon_winsys_cs *dma = ctx->b.rings.dma.cs;
+
+ /* Flush the DMA IB if it's not empty. */
+ if (dma && dma->cdw)
+ ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+
if (!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs, ctx->b.vram, ctx->b.gtt)) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
+ ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return;
}
/* all will be accounted once relocation are emited */
num_dw += ctx->b.rings.gfx.cs->cdw;
if (count_draw_in) {
- unsigned i;
+ uint64_t mask;
/* The number of dwords all the dirty states would take. */
- for (i = 0; i < R600_NUM_ATOMS; i++) {
- if (ctx->atoms[i] && ctx->atoms[i]->dirty) {
- num_dw += ctx->atoms[i]->num_dw;
- if (ctx->screen->trace_bo) {
- num_dw += R600_TRACE_CS_DWORDS;
- }
+ mask = ctx->dirty_atoms;
+ while (mask != 0) {
+ num_dw += ctx->atoms[u_bit_scan64(&mask)]->num_dw;
+ if (ctx->screen->b.trace_bo) {
+ num_dw += R600_TRACE_CS_DWORDS;
}
}
/* The upper-bound of how much space a draw command would take. */
num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
- if (ctx->screen->trace_bo) {
+ if (ctx->screen->b.trace_bo) {
num_dw += R600_TRACE_CS_DWORDS;
}
}
/* Count in queries_suspend. */
- num_dw += ctx->num_cs_dw_nontimer_queries_suspend;
+ num_dw += ctx->b.num_cs_dw_nontimer_queries_suspend +
+ ctx->b.num_cs_dw_timer_queries_suspend;
/* Count in streamout_end at the end of CS. */
if (ctx->b.streamout.begin_emitted) {
}
/* Count in render_condition(NULL) at the end of CS. */
- if (ctx->predicate_drawing) {
+ if (ctx->b.predicate_drawing) {
num_dw += 3;
}
/* SX_MISC */
- if (ctx->b.chip_class <= R700) {
+ if (ctx->b.chip_class == R600) {
num_dw += 3;
}
num_dw += 10;
/* Flush if there's not enough space. */
- if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
+ if (num_dw > ctx->b.rings.gfx.cs->max_dw) {
+ ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
}
cp_coher_cntl |= S_0085F0_FULL_CACHE_ENA(1);
}
- if (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV) {
+ if (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV ||
+ (rctx->b.chip_class == R600 && rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH)) {
cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
}
S_0085F0_CB11_DEST_BASE_ENA(1);
}
- if (rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH) {
+ if (rctx->b.chip_class >= R700 &&
+ rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH) {
cp_coher_cntl |= S_0085F0_SO0_DEST_BASE_ENA(1) |
S_0085F0_SO1_DEST_BASE_ENA(1) |
S_0085F0_SO2_DEST_BASE_ENA(1) |
S_0085F0_SMX_ACTION_ENA(1);
}
- if (cp_coher_cntl && !rctx->skip_surface_sync_on_next_cs_flush) {
+ /* Workaround for buggy flushing on some R6xx chipsets. */
+ if ((rctx->b.flags & (R600_CONTEXT_FLUSH_AND_INV |
+ R600_CONTEXT_STREAMOUT_FLUSH)) &&
+ (rctx->b.family == CHIP_RV670 ||
+ rctx->b.family == CHIP_RS780 ||
+ rctx->b.family == CHIP_RS880)) {
+ cp_coher_cntl |= S_0085F0_CB1_DEST_BASE_ENA(1) |
+ S_0085F0_DEST_BASE_0_ENA(1);
+ }
+
+ if (cp_coher_cntl) {
cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_SYNC, 3, 0);
cs->buf[cs->cdw++] = cp_coher_cntl; /* CP_COHER_CNTL */
cs->buf[cs->cdw++] = 0xffffffff; /* CP_COHER_SIZE */
/* Use of WAIT_UNTIL is deprecated on Cayman+ */
if (rctx->b.family < CHIP_CAYMAN) {
/* wait for things to settle */
- r600_write_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
}
}
rctx->b.flags = 0;
}
-void r600_context_flush(struct r600_context *ctx, unsigned flags)
+void r600_context_gfx_flush(void *context, unsigned flags,
+ struct pipe_fence_handle **fence)
{
+ struct r600_context *ctx = context;
struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
- ctx->nontimer_queries_suspended = false;
- ctx->b.streamout.suspended = false;
-
- /* suspend queries */
- if (ctx->num_cs_dw_nontimer_queries_suspend) {
- r600_suspend_nontimer_queries(ctx);
- ctx->nontimer_queries_suspended = true;
- }
+ if (cs->cdw == ctx->b.initial_gfx_cs_size && !fence)
+ return;
- if (ctx->b.streamout.begin_emitted) {
- r600_emit_streamout_end(&ctx->b);
- ctx->b.streamout.suspended = true;
- }
+ r600_preflush_suspend_features(&ctx->b);
/* flush the framebuffer cache */
ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV |
r600_flush_emit(ctx);
/* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
- if (ctx->b.chip_class <= R700) {
- r600_write_context_reg(cs, R_028350_SX_MISC, 0);
+ if (ctx->b.chip_class == R600) {
+ radeon_set_context_reg(cs, R_028350_SX_MISC, 0);
}
/* force to keep tiling flags */
}
/* Flush the CS. */
- ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, ctx->screen->cs_count++);
+ ctx->b.ws->cs_flush(cs, flags, fence, ctx->screen->b.cs_count++);
- ctx->skip_surface_sync_on_next_cs_flush = false;
+ r600_begin_new_cs(ctx);
}
void r600_begin_new_cs(struct r600_context *ctx)
r600_emit_command_buffer(ctx->b.rings.gfx.cs, &ctx->start_cs_cmd);
/* Re-emit states. */
- ctx->alphatest_state.atom.dirty = true;
- ctx->blend_color.atom.dirty = true;
- ctx->cb_misc_state.atom.dirty = true;
- ctx->clip_misc_state.atom.dirty = true;
- ctx->clip_state.atom.dirty = true;
- ctx->db_misc_state.atom.dirty = true;
- ctx->db_state.atom.dirty = true;
- ctx->framebuffer.atom.dirty = true;
- ctx->pixel_shader.atom.dirty = true;
- ctx->poly_offset_state.atom.dirty = true;
- ctx->vgt_state.atom.dirty = true;
- ctx->sample_mask.atom.dirty = true;
- ctx->scissor.atom.dirty = true;
- ctx->config_state.atom.dirty = true;
- ctx->stencil_ref.atom.dirty = true;
- ctx->vertex_fetch_shader.atom.dirty = true;
- ctx->vertex_shader.atom.dirty = true;
- ctx->viewport.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->alphatest_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->blend_color.atom);
+ r600_mark_atom_dirty(ctx, &ctx->cb_misc_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->clip_misc_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->clip_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->db_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
+ r600_mark_atom_dirty(ctx, &ctx->pixel_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->poly_offset_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->vgt_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
+ ctx->scissor.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->scissor.atom.num_dw = R600_MAX_VIEWPORTS * 4;
+ r600_mark_atom_dirty(ctx, &ctx->scissor.atom);
+ ctx->viewport.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->viewport.atom.num_dw = R600_MAX_VIEWPORTS * 8;
+ r600_mark_atom_dirty(ctx, &ctx->viewport.atom);
+ if (ctx->b.chip_class < EVERGREEN) {
+ r600_mark_atom_dirty(ctx, &ctx->config_state.atom);
+ }
+ r600_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
+ r600_mark_atom_dirty(ctx, &ctx->vertex_fetch_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->export_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->shader_stages.atom);
+ if (ctx->gs_shader) {
+ r600_mark_atom_dirty(ctx, &ctx->geometry_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->gs_rings.atom);
+ }
+ r600_mark_atom_dirty(ctx, &ctx->vertex_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
if (ctx->blend_state.cso)
- ctx->blend_state.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->blend_state.atom);
if (ctx->dsa_state.cso)
- ctx->dsa_state.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->dsa_state.atom);
if (ctx->rasterizer_state.cso)
- ctx->rasterizer_state.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->rasterizer_state.atom);
if (ctx->b.chip_class <= R700) {
- ctx->seamless_cube_map.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->seamless_cube_map.atom);
}
ctx->vertex_buffer_state.dirty_mask = ctx->vertex_buffer_state.enabled_mask;
r600_sampler_states_dirty(ctx, &samplers->states);
}
- if (ctx->b.streamout.suspended) {
- ctx->b.streamout.append_bitmask = ctx->b.streamout.enabled_mask;
- r600_streamout_buffers_dirty(&ctx->b);
- }
-
- /* resume queries */
- if (ctx->nontimer_queries_suspended) {
- r600_resume_nontimer_queries(ctx);
- }
+ r600_postflush_resume_features(&ctx->b);
/* Re-emit the draw state. */
ctx->last_primitive_type = -1;
ctx->last_start_instance = -1;
- ctx->initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;
+ ctx->b.initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;
}
/* The max number of bytes to copy per packet. */
assert(size);
assert(rctx->screen->b.has_cp_dma);
- dst_offset += r600_resource_va(&rctx->screen->b.b, dst);
- src_offset += r600_resource_va(&rctx->screen->b.b, src);
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
+ dst_offset + size);
+
+ dst_offset += r600_resource(dst)->gpu_address;
+ src_offset += r600_resource(src)->gpu_address;
/* Flush the caches where the resources are bound. */
rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE |
}
/* This must be done after r600_need_cs_space. */
- src_reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
- dst_reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
+ src_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)src,
+ RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
+ dst_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst,
+ RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
radeon_emit(cs, src_offset); /* SRC_ADDR_LO [31:0] */
rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE |
R600_CONTEXT_INV_VERTEX_CACHE |
R600_CONTEXT_INV_TEX_CACHE;
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
- dst_offset + size);
-}
-
-void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw)
-{
- /* The number of dwords we already used in the DMA so far. */
- num_dw += ctx->b.rings.dma.cs->cdw;
- /* Flush if there's not enough space. */
- if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
- ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
- }
}
-void r600_dma_copy(struct r600_context *rctx,
- struct pipe_resource *dst,
- struct pipe_resource *src,
- uint64_t dst_offset,
- uint64_t src_offset,
- uint64_t size)
+void r600_dma_copy_buffer(struct r600_context *rctx,
+ struct pipe_resource *dst,
+ struct pipe_resource *src,
+ uint64_t dst_offset,
+ uint64_t src_offset,
+ uint64_t size)
{
struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
- unsigned i, ncopy, csize, shift;
+ unsigned i, ncopy, csize;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
- /* make sure that the dma ring is only one active */
- rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&rdst->valid_buffer_range, dst_offset,
+ dst_offset + size);
- size >>= 2;
- shift = 2;
- ncopy = (size / 0xffff) + !!(size % 0xffff);
+ size >>= 2; /* convert to dwords */
+ ncopy = (size / R600_DMA_COPY_MAX_SIZE_DW) + !!(size % R600_DMA_COPY_MAX_SIZE_DW);
- r600_need_dma_space(rctx, ncopy * 5);
+ r600_need_dma_space(&rctx->b, ncopy * 5);
for (i = 0; i < ncopy; i++) {
- csize = size < 0xffff ? size : 0xffff;
- /* emit reloc before writting cs so that cs is always in consistent state */
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rsrc, RADEON_USAGE_READ);
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rdst, RADEON_USAGE_WRITE);
+ csize = size < R600_DMA_COPY_MAX_SIZE_DW ? size : R600_DMA_COPY_MAX_SIZE_DW;
+ /* emit reloc before writing cs so that cs is always in consistent state */
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, rsrc, RADEON_USAGE_READ,
+ RADEON_PRIO_SDMA_BUFFER);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.rings.dma, rdst, RADEON_USAGE_WRITE,
+ RADEON_PRIO_SDMA_BUFFER);
cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize);
cs->buf[cs->cdw++] = dst_offset & 0xfffffffc;
cs->buf[cs->cdw++] = src_offset & 0xfffffffc;
cs->buf[cs->cdw++] = (dst_offset >> 32UL) & 0xff;
cs->buf[cs->cdw++] = (src_offset >> 32UL) & 0xff;
- dst_offset += csize << shift;
- src_offset += csize << shift;
+ dst_offset += csize << 2;
+ src_offset += csize << 2;
size -= csize;
}
-
- util_range_add(&rdst->valid_buffer_range, dst_offset,
- dst_offset + size);
}