#include <errno.h>
#include <unistd.h>
-/* Get backends mask */
-void r600_get_backend_mask(struct r600_context *ctx)
-{
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
- struct r600_resource *buffer;
- uint32_t *results;
- unsigned num_backends = ctx->screen->b.info.r600_num_backends;
- unsigned i, mask = 0;
- uint64_t va;
-
- /* if backend_map query is supported by the kernel */
- if (ctx->screen->b.info.r600_backend_map_valid) {
- unsigned num_tile_pipes = ctx->screen->b.info.r600_num_tile_pipes;
- unsigned backend_map = ctx->screen->b.info.r600_backend_map;
- unsigned item_width, item_mask;
-
- if (ctx->b.chip_class >= EVERGREEN) {
- item_width = 4;
- item_mask = 0x7;
- } else {
- item_width = 2;
- item_mask = 0x3;
- }
-
- while(num_tile_pipes--) {
- i = backend_map & item_mask;
- mask |= (1<<i);
- backend_map >>= item_width;
- }
- if (mask != 0) {
- ctx->backend_mask = mask;
- return;
- }
- }
-
- /* otherwise backup path for older kernels */
-
- /* create buffer for event data */
- buffer = (struct r600_resource*)
- pipe_buffer_create(&ctx->screen->b.b, PIPE_BIND_CUSTOM,
- PIPE_USAGE_STAGING, ctx->max_db*16);
- if (!buffer)
- goto err;
- va = r600_resource_va(&ctx->screen->b.b, (void*)buffer);
-
- /* initialize buffer with zeroes */
- results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
- if (results) {
- memset(results, 0, ctx->max_db * 4 * 4);
- ctx->b.ws->buffer_unmap(buffer->cs_buf);
-
- /* emit EVENT_WRITE for ZPASS_DONE */
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
- cs->buf[cs->cdw++] = va;
- cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
-
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, buffer, RADEON_USAGE_WRITE);
-
- /* analyze results */
- results = r600_buffer_mmap_sync_with_rings(ctx, buffer, PIPE_TRANSFER_READ);
- if (results) {
- for(i = 0; i < ctx->max_db; i++) {
- /* at least highest bit will be set if backend is used */
- if (results[i*4 + 1])
- mask |= (1<<i);
- }
- ctx->b.ws->buffer_unmap(buffer->cs_buf);
- }
- }
-
- pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
-
- if (mask != 0) {
- ctx->backend_mask = mask;
- return;
- }
-
-err:
- /* fallback to old method - set num_backends lower bits to 1 */
- ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
- return;
-}
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
boolean count_draw_in)
{
- if (!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs, ctx->b.vram, ctx->b.gtt)) {
+ /* Flush the DMA IB if it's not empty. */
+ if (radeon_emitted(ctx->b.dma.cs, 0))
+ ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+
+ if (!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
+ ctx->b.vram, ctx->b.gtt)) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return;
}
/* all will be accounted once relocation are emited */
ctx->b.gtt = 0;
ctx->b.vram = 0;
- /* The number of dwords we already used in the CS so far. */
- num_dw += ctx->b.rings.gfx.cs->cdw;
-
+ /* Check available space in CS. */
if (count_draw_in) {
- unsigned i;
+ uint64_t mask;
/* The number of dwords all the dirty states would take. */
- for (i = 0; i < R600_NUM_ATOMS; i++) {
- if (ctx->atoms[i] && ctx->atoms[i]->dirty) {
- num_dw += ctx->atoms[i]->num_dw;
- if (ctx->screen->trace_bo) {
- num_dw += R600_TRACE_CS_DWORDS;
- }
- }
- }
+ mask = ctx->dirty_atoms;
+ while (mask != 0)
+ num_dw += ctx->atoms[u_bit_scan64(&mask)]->num_dw;
/* The upper-bound of how much space a draw command would take. */
num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
- if (ctx->screen->trace_bo) {
- num_dw += R600_TRACE_CS_DWORDS;
- }
}
- /* Count in queries_suspend. */
- num_dw += ctx->num_cs_dw_nontimer_queries_suspend;
+ /* Count in r600_suspend_queries. */
+ num_dw += ctx->b.num_cs_dw_queries_suspend;
/* Count in streamout_end at the end of CS. */
if (ctx->b.streamout.begin_emitted) {
num_dw += ctx->b.streamout.num_dw_for_end;
}
- /* Count in render_condition(NULL) at the end of CS. */
- if (ctx->predicate_drawing) {
- num_dw += 3;
- }
-
/* SX_MISC */
- if (ctx->b.chip_class <= R700) {
+ if (ctx->b.chip_class == R600) {
num_dw += 3;
}
num_dw += 10;
/* Flush if there's not enough space. */
- if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC);
+ if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw)) {
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
}
void r600_flush_emit(struct r600_context *rctx)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
unsigned cp_coher_cntl = 0;
unsigned wait_until = 0;
return;
}
+ /* Ensure coherency between streamout and shaders. */
+ if (rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH)
+ rctx->b.flags |= r600_get_flush_flags(R600_COHERENCY_SHADER);
+
if (rctx->b.flags & R600_CONTEXT_WAIT_3D_IDLE) {
wait_until |= S_008040_WAIT_3D_IDLE(1);
}
}
}
+ /* Wait packets must be executed first, because SURFACE_SYNC doesn't
+ * wait for shaders if it's not flushing CB or DB.
+ */
if (rctx->b.flags & R600_CONTEXT_PS_PARTIAL_FLUSH) {
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+
+ if (wait_until) {
+ /* Use of WAIT_UNTIL is deprecated on Cayman+ */
+ if (rctx->b.family < CHIP_CAYMAN) {
+ /* wait for things to settle */
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
+ }
}
if (rctx->b.chip_class >= R700 &&
(rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_CB_META)) {
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0);
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
}
if (rctx->b.chip_class >= R700 &&
(rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_DB_META)) {
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0);
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
/* Set FULL_CACHE_ENA for DB META flushes on r7xx and later.
*
cp_coher_cntl |= S_0085F0_FULL_CACHE_ENA(1);
}
- if (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV) {
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
+ if (rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV ||
+ (rctx->b.chip_class == R600 && rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH)) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
}
if (rctx->b.flags & R600_CONTEXT_INV_CONST_CACHE) {
: S_0085F0_TC_ACTION_ENA(1);
}
if (rctx->b.flags & R600_CONTEXT_INV_TEX_CACHE) {
- cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
+ /* Textures use the texture cache.
+ * Texture buffer objects use the vertex cache. */
+ cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1) |
+ (rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1) : 0);
}
/* Don't use the DB CP COHER logic on r6xx.
S_0085F0_CB11_DEST_BASE_ENA(1);
}
- if (rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH) {
+ if (rctx->b.chip_class >= R700 &&
+ rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH) {
cp_coher_cntl |= S_0085F0_SO0_DEST_BASE_ENA(1) |
S_0085F0_SO1_DEST_BASE_ENA(1) |
S_0085F0_SO2_DEST_BASE_ENA(1) |
S_0085F0_SMX_ACTION_ENA(1);
}
+ /* Workaround for buggy flushing on some R6xx chipsets. */
+ if ((rctx->b.flags & (R600_CONTEXT_FLUSH_AND_INV |
+ R600_CONTEXT_STREAMOUT_FLUSH)) &&
+ (rctx->b.family == CHIP_RV670 ||
+ rctx->b.family == CHIP_RS780 ||
+ rctx->b.family == CHIP_RS880)) {
+ cp_coher_cntl |= S_0085F0_CB1_DEST_BASE_ENA(1) |
+ S_0085F0_DEST_BASE_0_ENA(1);
+ }
+
if (cp_coher_cntl) {
- cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_SYNC, 3, 0);
- cs->buf[cs->cdw++] = cp_coher_cntl; /* CP_COHER_CNTL */
- cs->buf[cs->cdw++] = 0xffffffff; /* CP_COHER_SIZE */
- cs->buf[cs->cdw++] = 0; /* CP_COHER_BASE */
- cs->buf[cs->cdw++] = 0x0000000A; /* POLL_INTERVAL */
+ radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
+ radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
+ radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */
+ radeon_emit(cs, 0); /* CP_COHER_BASE */
+ radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */
}
- if (wait_until) {
- /* Use of WAIT_UNTIL is deprecated on Cayman+ */
- if (rctx->b.family < CHIP_CAYMAN) {
- /* wait for things to settle */
- r600_write_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
- }
+ if (rctx->b.flags & R600_CONTEXT_START_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) |
+ EVENT_INDEX(0));
+ } else if (rctx->b.flags & R600_CONTEXT_STOP_PIPELINE_STATS) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_STOP) |
+ EVENT_INDEX(0));
}
/* everything is properly flushed */
rctx->b.flags = 0;
}
-void r600_context_flush(struct r600_context *ctx, unsigned flags)
+void r600_context_gfx_flush(void *context, unsigned flags,
+ struct pipe_fence_handle **fence)
{
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
+ struct r600_context *ctx = context;
+ struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys *ws = ctx->b.ws;
- ctx->nontimer_queries_suspended = false;
- ctx->b.streamout.suspended = false;
+ if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
+ return;
- /* suspend queries */
- if (ctx->num_cs_dw_nontimer_queries_suspend) {
- r600_suspend_nontimer_queries(ctx);
- ctx->nontimer_queries_suspended = true;
- }
+ if (r600_check_device_reset(&ctx->b))
+ return;
- if (ctx->b.streamout.begin_emitted) {
- r600_emit_streamout_end(&ctx->b);
- ctx->b.streamout.suspended = true;
- }
+ r600_preflush_suspend_features(&ctx->b);
- /* flush is needed to avoid lockups on some chips with user fences
- * this will also flush the framebuffer cache
- */
+ /* flush the framebuffer cache */
ctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV |
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV_DB |
r600_flush_emit(ctx);
+ if (ctx->trace_buf)
+ eg_trace_emit(ctx);
/* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
- if (ctx->b.chip_class <= R700) {
- r600_write_context_reg(cs, R_028350_SX_MISC, 0);
+ if (ctx->b.chip_class == R600) {
+ radeon_set_context_reg(cs, R_028350_SX_MISC, 0);
}
- /* force to keep tiling flags */
- if (ctx->keep_tiling_flags) {
- flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
+ if (ctx->is_debug) {
+ /* Save the IB for debug contexts. */
+ radeon_clear_saved_cs(&ctx->last_gfx);
+ radeon_save_cs(ws, cs, &ctx->last_gfx);
+ r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
+ r600_resource_reference(&ctx->trace_buf, NULL);
}
-
/* Flush the CS. */
- ctx->b.ws->cs_flush(ctx->b.rings.gfx.cs, flags, ctx->screen->cs_count++);
+ ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
+ if (fence)
+ ws->fence_reference(fence, ctx->b.last_gfx_fence);
+ ctx->b.num_gfx_cs_flushes++;
+
+ if (ctx->is_debug) {
+ if (!ws->fence_wait(ws, ctx->b.last_gfx_fence, 10000000)) {
+ const char *fname = getenv("R600_TRACE");
+ if (!fname)
+ exit(-1);
+ FILE *fl = fopen(fname, "w+");
+ if (fl) {
+ eg_dump_debug_state(&ctx->b.b, fl, 0);
+ fclose(fl);
+ } else
+ perror(fname);
+ exit(-1);
+ }
+ }
+ r600_begin_new_cs(ctx);
}
void r600_begin_new_cs(struct r600_context *ctx)
{
unsigned shader;
+ if (ctx->is_debug) {
+ uint32_t zero = 0;
+
+ /* Create a buffer used for writing trace IDs and initialize it to 0. */
+ assert(!ctx->trace_buf);
+ ctx->trace_buf = (struct r600_resource*)
+ pipe_buffer_create(ctx->b.b.screen, 0,
+ PIPE_USAGE_STAGING, 4);
+ if (ctx->trace_buf)
+ pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->trace_buf->b.b,
+ 0, sizeof(zero), &zero);
+ ctx->trace_id = 0;
+ }
+
+ if (ctx->trace_buf)
+ eg_trace_emit(ctx);
+
ctx->b.flags = 0;
ctx->b.gtt = 0;
ctx->b.vram = 0;
/* Begin a new CS. */
- r600_emit_command_buffer(ctx->b.rings.gfx.cs, &ctx->start_cs_cmd);
+ r600_emit_command_buffer(ctx->b.gfx.cs, &ctx->start_cs_cmd);
/* Re-emit states. */
- ctx->alphatest_state.atom.dirty = true;
- ctx->blend_color.atom.dirty = true;
- ctx->cb_misc_state.atom.dirty = true;
- ctx->clip_misc_state.atom.dirty = true;
- ctx->clip_state.atom.dirty = true;
- ctx->db_misc_state.atom.dirty = true;
- ctx->db_state.atom.dirty = true;
- ctx->framebuffer.atom.dirty = true;
- ctx->pixel_shader.atom.dirty = true;
- ctx->poly_offset_state.atom.dirty = true;
- ctx->vgt_state.atom.dirty = true;
- ctx->sample_mask.atom.dirty = true;
- ctx->scissor.atom.dirty = true;
- ctx->config_state.atom.dirty = true;
- ctx->stencil_ref.atom.dirty = true;
- ctx->vertex_fetch_shader.atom.dirty = true;
- ctx->vertex_shader.atom.dirty = true;
- ctx->viewport.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->alphatest_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->blend_color.atom);
+ r600_mark_atom_dirty(ctx, &ctx->cb_misc_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->clip_misc_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->clip_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->db_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_PS].atom);
+ r600_mark_atom_dirty(ctx, &ctx->poly_offset_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->vgt_state.atom);
+ r600_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
+ ctx->b.scissors.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ r600_mark_atom_dirty(ctx, &ctx->b.scissors.atom);
+ ctx->b.viewports.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->b.viewports.depth_range_dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ r600_mark_atom_dirty(ctx, &ctx->b.viewports.atom);
+ if (ctx->b.chip_class <= EVERGREEN) {
+ r600_mark_atom_dirty(ctx, &ctx->config_state.atom);
+ }
+ r600_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
+ r600_mark_atom_dirty(ctx, &ctx->vertex_fetch_shader.atom);
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_ES].atom);
+ r600_mark_atom_dirty(ctx, &ctx->shader_stages.atom);
+ if (ctx->gs_shader) {
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_GS].atom);
+ r600_mark_atom_dirty(ctx, &ctx->gs_rings.atom);
+ }
+ if (ctx->tes_shader) {
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[EG_HW_STAGE_HS].atom);
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[EG_HW_STAGE_LS].atom);
+ }
+ r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_VS].atom);
+ r600_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
+ r600_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
if (ctx->blend_state.cso)
- ctx->blend_state.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->blend_state.atom);
if (ctx->dsa_state.cso)
- ctx->dsa_state.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->dsa_state.atom);
if (ctx->rasterizer_state.cso)
- ctx->rasterizer_state.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->rasterizer_state.atom);
if (ctx->b.chip_class <= R700) {
- ctx->seamless_cube_map.atom.dirty = true;
+ r600_mark_atom_dirty(ctx, &ctx->seamless_cube_map.atom);
}
ctx->vertex_buffer_state.dirty_mask = ctx->vertex_buffer_state.enabled_mask;
r600_sampler_states_dirty(ctx, &samplers->states);
}
- if (ctx->b.streamout.suspended) {
- ctx->b.streamout.append_bitmask = ctx->b.streamout.enabled_mask;
- r600_streamout_buffers_dirty(&ctx->b);
- }
-
- /* resume queries */
- if (ctx->nontimer_queries_suspended) {
- r600_resume_nontimer_queries(ctx);
- }
+ r600_postflush_resume_features(&ctx->b);
/* Re-emit the draw state. */
ctx->last_primitive_type = -1;
ctx->last_start_instance = -1;
+ ctx->last_rast_prim = -1;
+ ctx->current_rast_prim = -1;
- ctx->initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;
+ assert(!ctx->b.gfx.cs->prev_dw);
+ ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
}
-void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value)
+void r600_emit_pfp_sync_me(struct r600_context *rctx)
{
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
- uint64_t va;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
- r600_need_cs_space(ctx, 10, FALSE);
+ if (rctx->b.chip_class >= EVERGREEN &&
+ rctx->b.screen->info.drm_minor >= 46) {
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
+ } else {
+ /* Emulate PFP_SYNC_ME by writing a value to memory in ME and
+ * waiting for it in PFP.
+ */
+ struct r600_resource *buf = NULL;
+ unsigned offset, reloc;
+ uint64_t va;
+
+ /* 16-byte address alignment is required by WAIT_REG_MEM. */
+ u_suballocator_alloc(rctx->b.allocator_zeroed_memory, 4, 16,
+ &offset, (struct pipe_resource**)&buf);
+ if (!buf) {
+ /* This is too heavyweight, but will work. */
+ rctx->b.gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ return;
+ }
- va = r600_resource_va(&ctx->screen->b.b, (void*)fence_bo);
- va = va + (offset << 2);
+ reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, buf,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_FENCE);
- /* Use of WAIT_UNTIL is deprecated on Cayman+ */
- if (ctx->b.family >= CHIP_CAYMAN) {
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
- } else {
- r600_write_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1));
- }
-
- cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
- cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
- cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* ADDRESS_LO */
- /* DATA_SEL | INT_EN | ADDRESS_HI */
- cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF);
- cs->buf[cs->cdw++] = value; /* DATA_LO */
- cs->buf[cs->cdw++] = 0; /* DATA_HI */
- cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
- cs->buf[cs->cdw++] = r600_context_bo_reloc(&ctx->b, &ctx->b.rings.gfx, fence_bo, RADEON_USAGE_WRITE);
+ va = buf->gpu_address + offset;
+ assert(va % 16 == 0);
+
+ /* Write 1 to memory in ME. */
+ radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, ((va >> 32) & 0xff) | MEM_WRITE_32_BITS);
+ radeon_emit(cs, 1);
+ radeon_emit(cs, 0);
+
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
+
+ /* Wait in PFP (PFP can only do GEQUAL against memory). */
+ radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+ radeon_emit(cs, WAIT_REG_MEM_GEQUAL |
+ WAIT_REG_MEM_MEMORY |
+ WAIT_REG_MEM_PFP);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, 1); /* reference value */
+ radeon_emit(cs, 0xffffffff); /* mask */
+ radeon_emit(cs, 4); /* poll interval */
+
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
+
+ r600_resource_reference(&buf, NULL);
+ }
}
/* The max number of bytes to copy per packet. */
struct pipe_resource *src, uint64_t src_offset,
unsigned size)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
assert(size);
- assert(rctx->screen->has_cp_dma);
+ assert(rctx->screen->b.has_cp_dma);
+
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
+ util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
+ dst_offset + size);
- dst_offset += r600_resource_va(&rctx->screen->b.b, dst);
- src_offset += r600_resource_va(&rctx->screen->b.b, src);
+ dst_offset += r600_resource(dst)->gpu_address;
+ src_offset += r600_resource(src)->gpu_address;
/* Flush the caches where the resources are bound. */
- r600_flag_resource_cache_flush(rctx, src);
- r600_flag_resource_cache_flush(rctx, dst);
- rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
+ rctx->b.flags |= r600_get_flush_flags(R600_COHERENCY_SHADER) |
+ R600_CONTEXT_WAIT_3D_IDLE;
/* There are differences between R700 and EG in CP DMA,
* but we only use the common bits here. */
unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
unsigned src_reloc, dst_reloc;
- r600_need_cs_space(rctx, 10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE);
+ r600_need_cs_space(rctx,
+ 10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0) +
+ 3 + R600_MAX_PFP_SYNC_ME_DWORDS, FALSE);
/* Flush the caches for the first copy only. */
if (rctx->b.flags) {
}
/* This must be done after r600_need_cs_space. */
- src_reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)src, RADEON_USAGE_READ);
- dst_reloc = r600_context_bo_reloc(&rctx->b, &rctx->b.rings.gfx, (struct r600_resource*)dst, RADEON_USAGE_WRITE);
+ src_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, (struct r600_resource*)src,
+ RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
+ dst_reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, (struct r600_resource*)dst,
+ RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
radeon_emit(cs, src_offset); /* SRC_ADDR_LO [31:0] */
dst_offset += byte_count;
}
- /* Flush the cache of the dst resource again in case the 3D engine
- * has been prefetching it. */
- r600_flag_resource_cache_flush(rctx, dst);
-
- util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
- dst_offset + size);
-}
+ /* CP_DMA_CP_SYNC doesn't wait for idle on R6xx, but this does. */
+ if (rctx->b.chip_class == R600)
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL,
+ S_008040_WAIT_CP_DMA_IDLE(1));
-void r600_need_dma_space(struct r600_context *ctx, unsigned num_dw)
-{
- /* The number of dwords we already used in the DMA so far. */
- num_dw += ctx->b.rings.dma.cs->cdw;
- /* Flush if there's not enough space. */
- if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
- ctx->b.rings.dma.flush(ctx, RADEON_FLUSH_ASYNC);
- }
+ /* CP DMA is executed in ME, but index buffers are read by PFP.
+ * This ensures that ME (CP DMA) is idle before PFP starts fetching
+ * indices. If we wanted to execute CP DMA in PFP, this packet
+ * should precede it.
+ */
+ r600_emit_pfp_sync_me(rctx);
}
-void r600_dma_copy(struct r600_context *rctx,
- struct pipe_resource *dst,
- struct pipe_resource *src,
- uint64_t dst_offset,
- uint64_t src_offset,
- uint64_t size)
+void r600_dma_copy_buffer(struct r600_context *rctx,
+ struct pipe_resource *dst,
+ struct pipe_resource *src,
+ uint64_t dst_offset,
+ uint64_t src_offset,
+ uint64_t size)
{
- struct radeon_winsys_cs *cs = rctx->b.rings.dma.cs;
- unsigned i, ncopy, csize, shift;
+ struct radeon_winsys_cs *cs = rctx->b.dma.cs;
+ unsigned i, ncopy, csize;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
- /* make sure that the dma ring is only one active */
- rctx->b.rings.gfx.flush(rctx, RADEON_FLUSH_ASYNC);
-
- size >>= 2;
- shift = 2;
- ncopy = (size / 0xffff) + !!(size % 0xffff);
-
- r600_need_dma_space(rctx, ncopy * 5);
- for (i = 0; i < ncopy; i++) {
- csize = size < 0xffff ? size : 0xffff;
- /* emit reloc before writting cs so that cs is always in consistent state */
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rsrc, RADEON_USAGE_READ);
- r600_context_bo_reloc(&rctx->b, &rctx->b.rings.dma, rdst, RADEON_USAGE_WRITE);
- cs->buf[cs->cdw++] = DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize);
- cs->buf[cs->cdw++] = dst_offset & 0xfffffffc;
- cs->buf[cs->cdw++] = src_offset & 0xfffffffc;
- cs->buf[cs->cdw++] = (dst_offset >> 32UL) & 0xff;
- cs->buf[cs->cdw++] = (src_offset >> 32UL) & 0xff;
- dst_offset += csize << shift;
- src_offset += csize << shift;
- size -= csize;
- }
-
+ /* Mark the buffer range of destination as valid (initialized),
+ * so that transfer_map knows it should wait for the GPU when mapping
+ * that range. */
util_range_add(&rdst->valid_buffer_range, dst_offset,
dst_offset + size);
-}
-
-/* Flag the cache of the resource for it to be flushed later if the resource
- * is bound. Otherwise do nothing. Used for synchronization between engines.
- */
-void r600_flag_resource_cache_flush(struct r600_context *rctx,
- struct pipe_resource *res)
-{
- /* Check vertex buffers. */
- uint32_t mask = rctx->vertex_buffer_state.enabled_mask;
- while (mask) {
- uint32_t i = u_bit_scan(&mask);
- if (rctx->vertex_buffer_state.vb[i].buffer == res) {
- rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
- }
- }
- /* Check vertex buffers for compute. */
- mask = rctx->cs_vertex_buffer_state.enabled_mask;
- while (mask) {
- uint32_t i = u_bit_scan(&mask);
- if (rctx->cs_vertex_buffer_state.vb[i].buffer == res) {
- rctx->b.flags |= R600_CONTEXT_INV_VERTEX_CACHE;
- }
- }
+ size >>= 2; /* convert to dwords */
+ ncopy = (size / R600_DMA_COPY_MAX_SIZE_DW) + !!(size % R600_DMA_COPY_MAX_SIZE_DW);
- /* Check constant buffers. */
- unsigned shader;
- for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
- struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
- uint32_t mask = state->enabled_mask;
-
- while (mask) {
- unsigned i = u_bit_scan(&mask);
- if (state->cb[i].buffer == res) {
- rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
-
- shader = PIPE_SHADER_TYPES; /* break the outer loop */
- break;
- }
- }
- }
-
- /* Check textures. */
- for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
- struct r600_samplerview_state *state = &rctx->samplers[shader].views;
- uint32_t mask = state->enabled_mask;
-
- while (mask) {
- uint32_t i = u_bit_scan(&mask);
- if (&state->views[i]->tex_resource->b.b == res) {
- rctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
-
- shader = PIPE_SHADER_TYPES; /* break the outer loop */
- break;
- }
- }
- }
-
- /* Check streamout buffers. */
- int i;
- for (i = 0; i < rctx->b.streamout.num_targets; i++) {
- if (rctx->b.streamout.targets[i]->b.buffer == res) {
- rctx->b.flags |= R600_CONTEXT_STREAMOUT_FLUSH |
- R600_CONTEXT_FLUSH_AND_INV |
- R600_CONTEXT_WAIT_3D_IDLE;
- break;
- }
- }
-
- /* Check colorbuffers. */
- for (i = 0; i < rctx->framebuffer.state.nr_cbufs; i++) {
- struct r600_texture *tex;
-
- if (rctx->framebuffer.state.cbufs[i] == NULL) {
- continue;
- }
-
- tex = (struct r600_texture*)rctx->framebuffer.state.cbufs[i]->texture;
-
- if (rctx->framebuffer.state.cbufs[i]->texture == res) {
- rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB |
- R600_CONTEXT_FLUSH_AND_INV |
- R600_CONTEXT_WAIT_3D_IDLE;
-
- if (tex->cmask_size || tex->fmask_size) {
- rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META;
- }
- break;
- }
-
- if (tex && tex->cmask && tex->cmask != &tex->resource && &tex->cmask->b.b == res) {
- rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_CB_META |
- R600_CONTEXT_FLUSH_AND_INV |
- R600_CONTEXT_WAIT_3D_IDLE;
- }
- }
-
- /* Check a depth buffer. */
- if (rctx->framebuffer.state.zsbuf) {
- if (rctx->framebuffer.state.zsbuf->texture == res) {
- rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB |
- R600_CONTEXT_FLUSH_AND_INV |
- R600_CONTEXT_WAIT_3D_IDLE;
- }
-
- struct r600_texture *tex =
- (struct r600_texture*)rctx->framebuffer.state.zsbuf->texture;
- if (tex && tex->htile && &tex->htile->b.b == res) {
- rctx->b.flags |= R600_CONTEXT_FLUSH_AND_INV_DB_META |
- R600_CONTEXT_FLUSH_AND_INV |
- R600_CONTEXT_WAIT_3D_IDLE;
- }
+ r600_need_dma_space(&rctx->b, ncopy * 5, rdst, rsrc);
+ for (i = 0; i < ncopy; i++) {
+ csize = size < R600_DMA_COPY_MAX_SIZE_DW ? size : R600_DMA_COPY_MAX_SIZE_DW;
+ /* emit reloc before writing cs so that cs is always in consistent state */
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ,
+ RADEON_PRIO_SDMA_BUFFER);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE,
+ RADEON_PRIO_SDMA_BUFFER);
+ radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize));
+ radeon_emit(cs, dst_offset & 0xfffffffc);
+ radeon_emit(cs, src_offset & 0xfffffffc);
+ radeon_emit(cs, (dst_offset >> 32UL) & 0xff);
+ radeon_emit(cs, (src_offset >> 32UL) & 0xff);
+ dst_offset += csize << 2;
+ src_offset += csize << 2;
+ size -= csize;
}
}