void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
- boolean count_draw_in)
+ boolean count_draw_in, unsigned num_atomics)
{
/* Flush the DMA IB if it's not empty. */
if (radeon_emitted(ctx->b.dma.cs, 0))
- ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
- if (!ctx->b.ws->cs_memory_below_limit(ctx->b.gfx.cs, ctx->b.vram, ctx->b.gtt)) {
+ if (!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
+ ctx->b.vram, ctx->b.gtt)) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return;
}
/* all will be accounted once relocation are emited */
num_dw += R600_MAX_FLUSH_CS_DWORDS + R600_MAX_DRAW_CS_DWORDS;
}
+ /* add atomic counters, 8 pre + 8 post per counter + 16 post if any counters */
+ num_dw += (num_atomics * 16) + (num_atomics ? 16 : 0);
+
/* Count in r600_suspend_queries. */
num_dw += ctx->b.num_cs_dw_queries_suspend;
/* Flush if there's not enough space. */
if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw)) {
- ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
}
void r600_flush_emit(struct r600_context *rctx)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
unsigned cp_coher_cntl = 0;
unsigned wait_until = 0;
return;
}
+ /* Ensure coherency between streamout and shaders. */
+ if (rctx->b.flags & R600_CONTEXT_STREAMOUT_FLUSH)
+ rctx->b.flags |= r600_get_flush_flags(R600_COHERENCY_SHADER);
+
if (rctx->b.flags & R600_CONTEXT_WAIT_3D_IDLE) {
wait_until |= S_008040_WAIT_3D_IDLE(1);
}
}
}
+ /* Wait packets must be executed first, because SURFACE_SYNC doesn't
+ * wait for shaders if it's not flushing CB or DB.
+ */
if (rctx->b.flags & R600_CONTEXT_PS_PARTIAL_FLUSH) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
+ if (rctx->b.flags & R600_CONTEXT_CS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+
+ if (wait_until) {
+ /* Use of WAIT_UNTIL is deprecated on Cayman+ */
+ if (rctx->b.family < CHIP_CAYMAN) {
+ /* wait for things to settle */
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
+ }
+ }
+
if (rctx->b.chip_class >= R700 &&
(rctx->b.flags & R600_CONTEXT_FLUSH_AND_INV_CB_META)) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
EVENT_INDEX(0));
}
- if (wait_until) {
- /* Use of WAIT_UNTIL is deprecated on Cayman+ */
- if (rctx->b.family < CHIP_CAYMAN) {
- /* wait for things to settle */
- radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, wait_until);
- }
- }
-
/* everything is properly flushed */
rctx->b.flags = 0;
}
struct pipe_fence_handle **fence)
{
struct r600_context *ctx = context;
- struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = ctx->b.gfx.cs;
+ struct radeon_winsys *ws = ctx->b.ws;
+
+ if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
+ return;
- if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size) && !fence)
+ if (r600_check_device_reset(&ctx->b))
return;
r600_preflush_suspend_features(&ctx->b);
r600_flush_emit(ctx);
+ if (ctx->trace_buf)
+ eg_trace_emit(ctx);
/* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
if (ctx->b.chip_class == R600) {
radeon_set_context_reg(cs, R_028350_SX_MISC, 0);
}
- /* force to keep tiling flags */
- flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
-
+ if (ctx->is_debug) {
+ /* Save the IB for debug contexts. */
+ radeon_clear_saved_cs(&ctx->last_gfx);
+ radeon_save_cs(ws, cs, &ctx->last_gfx, true);
+ r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
+ r600_resource_reference(&ctx->trace_buf, NULL);
+ }
/* Flush the CS. */
- ctx->b.ws->cs_flush(cs, flags, fence);
-
+ ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
+ if (fence)
+ ws->fence_reference(fence, ctx->b.last_gfx_fence);
+ ctx->b.num_gfx_cs_flushes++;
+
+ if (ctx->is_debug) {
+ if (!ws->fence_wait(ws, ctx->b.last_gfx_fence, 10000000)) {
+ const char *fname = getenv("R600_TRACE");
+ if (!fname)
+ exit(-1);
+ FILE *fl = fopen(fname, "w+");
+ if (fl) {
+ eg_dump_debug_state(&ctx->b.b, fl, 0);
+ fclose(fl);
+ } else
+ perror(fname);
+ exit(-1);
+ }
+ }
r600_begin_new_cs(ctx);
}
{
unsigned shader;
+ if (ctx->is_debug) {
+ uint32_t zero = 0;
+
+ /* Create a buffer used for writing trace IDs and initialize it to 0. */
+ assert(!ctx->trace_buf);
+ ctx->trace_buf = (struct r600_resource*)
+ pipe_buffer_create(ctx->b.b.screen, 0,
+ PIPE_USAGE_STAGING, 4);
+ if (ctx->trace_buf)
+ pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->trace_buf->b.b,
+ 0, sizeof(zero), &zero);
+ ctx->trace_id = 0;
+ }
+
+ if (ctx->trace_buf)
+ eg_trace_emit(ctx);
+
ctx->b.flags = 0;
ctx->b.gtt = 0;
ctx->b.vram = 0;
r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
r600_mark_atom_dirty(ctx, &ctx->db_state.atom);
r600_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
+ if (ctx->b.chip_class >= EVERGREEN) {
+ r600_mark_atom_dirty(ctx, &ctx->fragment_images.atom);
+ r600_mark_atom_dirty(ctx, &ctx->fragment_buffers.atom);
+ r600_mark_atom_dirty(ctx, &ctx->compute_images.atom);
+ r600_mark_atom_dirty(ctx, &ctx->compute_buffers.atom);
+ }
r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_PS].atom);
r600_mark_atom_dirty(ctx, &ctx->poly_offset_state.atom);
r600_mark_atom_dirty(ctx, &ctx->vgt_state.atom);
ctx->b.scissors.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
r600_mark_atom_dirty(ctx, &ctx->b.scissors.atom);
ctx->b.viewports.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->b.viewports.depth_range_dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
r600_mark_atom_dirty(ctx, &ctx->b.viewports.atom);
if (ctx->b.chip_class <= EVERGREEN) {
r600_mark_atom_dirty(ctx, &ctx->config_state.atom);
r600_sampler_states_dirty(ctx, &samplers->states);
}
+ for (shader = 0; shader < ARRAY_SIZE(ctx->scratch_buffers); shader++) {
+ ctx->scratch_buffers[shader].dirty = true;
+ }
+
r600_postflush_resume_features(&ctx->b);
/* Re-emit the draw state. */
ctx->last_primitive_type = -1;
ctx->last_start_instance = -1;
+ ctx->last_rast_prim = -1;
+ ctx->current_rast_prim = -1;
assert(!ctx->b.gfx.cs->prev_dw);
ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
}
+void r600_emit_pfp_sync_me(struct r600_context *rctx)
+{
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
+
+ if (rctx->b.chip_class >= EVERGREEN &&
+ rctx->b.screen->info.drm_minor >= 46) {
+ radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
+ radeon_emit(cs, 0);
+ } else {
+ /* Emulate PFP_SYNC_ME by writing a value to memory in ME and
+ * waiting for it in PFP.
+ */
+ struct r600_resource *buf = NULL;
+ unsigned offset, reloc;
+ uint64_t va;
+
+ /* 16-byte address alignment is required by WAIT_REG_MEM. */
+ u_suballocator_alloc(rctx->b.allocator_zeroed_memory, 4, 16,
+ &offset, (struct pipe_resource**)&buf);
+ if (!buf) {
+ /* This is too heavyweight, but will work. */
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ return;
+ }
+
+ reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, buf,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_FENCE);
+
+ va = buf->gpu_address + offset;
+ assert(va % 16 == 0);
+
+ /* Write 1 to memory in ME. */
+ radeon_emit(cs, PKT3(PKT3_MEM_WRITE, 3, 0));
+ radeon_emit(cs, va);
+ radeon_emit(cs, ((va >> 32) & 0xff) | MEM_WRITE_32_BITS);
+ radeon_emit(cs, 1);
+ radeon_emit(cs, 0);
+
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
+
+ /* Wait in PFP (PFP can only do GEQUAL against memory). */
+ radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
+ radeon_emit(cs, WAIT_REG_MEM_GEQUAL |
+ WAIT_REG_MEM_MEMORY |
+ WAIT_REG_MEM_PFP);
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, 1); /* reference value */
+ radeon_emit(cs, 0xffffffff); /* mask */
+ radeon_emit(cs, 4); /* poll interval */
+
+ radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
+ radeon_emit(cs, reloc);
+
+ r600_resource_reference(&buf, NULL);
+ }
+}
+
/* The max number of bytes to copy per packet. */
#define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
struct pipe_resource *src, uint64_t src_offset,
unsigned size)
{
- struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
+ struct radeon_cmdbuf *cs = rctx->b.gfx.cs;
assert(size);
assert(rctx->screen->b.has_cp_dma);
src_offset += r600_resource(src)->gpu_address;
/* Flush the caches where the resources are bound. */
- rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE |
- R600_CONTEXT_INV_VERTEX_CACHE |
- R600_CONTEXT_INV_TEX_CACHE |
- R600_CONTEXT_FLUSH_AND_INV |
- R600_CONTEXT_FLUSH_AND_INV_CB |
- R600_CONTEXT_FLUSH_AND_INV_DB |
- R600_CONTEXT_FLUSH_AND_INV_CB_META |
- R600_CONTEXT_FLUSH_AND_INV_DB_META |
- R600_CONTEXT_STREAMOUT_FLUSH |
+ rctx->b.flags |= r600_get_flush_flags(R600_COHERENCY_SHADER) |
R600_CONTEXT_WAIT_3D_IDLE;
/* There are differences between R700 and EG in CP DMA,
unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
unsigned src_reloc, dst_reloc;
- r600_need_cs_space(rctx, 10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0), FALSE);
+ r600_need_cs_space(rctx,
+ 10 + (rctx->b.flags ? R600_MAX_FLUSH_CS_DWORDS : 0) +
+ 3 + R600_MAX_PFP_SYNC_ME_DWORDS, FALSE, 0);
/* Flush the caches for the first copy only. */
if (rctx->b.flags) {
dst_offset += byte_count;
}
- /* Invalidate the read caches. */
- rctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE |
- R600_CONTEXT_INV_VERTEX_CACHE |
- R600_CONTEXT_INV_TEX_CACHE;
+ /* CP_DMA_CP_SYNC doesn't wait for idle on R6xx, but this does. */
+ if (rctx->b.chip_class == R600)
+ radeon_set_config_reg(cs, R_008040_WAIT_UNTIL,
+ S_008040_WAIT_CP_DMA_IDLE(1));
+
+ /* CP DMA is executed in ME, but index buffers are read by PFP.
+ * This ensures that ME (CP DMA) is idle before PFP starts fetching
+ * indices. If we wanted to execute CP DMA in PFP, this packet
+ * should precede it.
+ */
+ r600_emit_pfp_sync_me(rctx);
}
void r600_dma_copy_buffer(struct r600_context *rctx,
uint64_t src_offset,
uint64_t size)
{
- struct radeon_winsys_cs *cs = rctx->b.dma.cs;
+ struct radeon_cmdbuf *cs = rctx->b.dma.cs;
unsigned i, ncopy, csize;
struct r600_resource *rdst = (struct r600_resource*)dst;
struct r600_resource *rsrc = (struct r600_resource*)src;
for (i = 0; i < ncopy; i++) {
csize = size < R600_DMA_COPY_MAX_SIZE_DW ? size : R600_DMA_COPY_MAX_SIZE_DW;
/* emit reloc before writing cs so that cs is always in consistent state */
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ,
- RADEON_PRIO_SDMA_BUFFER);
- radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE,
- RADEON_PRIO_SDMA_BUFFER);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rsrc, RADEON_USAGE_READ, 0);
+ radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, rdst, RADEON_USAGE_WRITE, 0);
radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, 0, 0, csize));
radeon_emit(cs, dst_offset & 0xfffffffc);
radeon_emit(cs, src_offset & 0xfffffffc);
src_offset += csize << 2;
size -= csize;
}
- r600_dma_emit_wait_idle(&rctx->b);
}