/* initialize */
void si_need_gfx_cs_space(struct si_context *ctx)
{
- struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
+ struct radeon_cmdbuf *cs = ctx->gfx_cs;
/* There is no need to flush the DMA IB here, because
* r600_need_dma_space always flushes the GFX IB if there is
* that have been added (cs_add_buffer) and two counters in the pipe
* driver for those that haven't been added yet.
*/
- if (unlikely(!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx_cs,
- ctx->b.vram, ctx->b.gtt))) {
- ctx->b.gtt = 0;
- ctx->b.vram = 0;
- si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ if (unlikely(!radeon_cs_memory_below_limit(ctx->screen, ctx->gfx_cs,
+ ctx->vram, ctx->gtt))) {
+ ctx->gtt = 0;
+ ctx->vram = 0;
+ si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return;
}
- ctx->b.gtt = 0;
- ctx->b.vram = 0;
+ ctx->gtt = 0;
+ ctx->vram = 0;
/* If the IB is sufficiently large, don't count the space needed
* and just flush if there is not enough space left.
* Also reserve space for stopping queries at the end of IB, because
* the number of active queries is mostly unlimited.
*/
- unsigned need_dwords = 2048 + ctx->b.num_cs_dw_queries_suspend;
- if (!ctx->b.ws->cs_check_space(cs, need_dwords))
- si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ unsigned need_dwords = 2048 + ctx->num_cs_dw_queries_suspend;
+ if (!ctx->ws->cs_check_space(cs, need_dwords))
+ si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}
void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
struct pipe_fence_handle **fence)
{
- struct radeon_winsys_cs *cs = ctx->b.gfx_cs;
- struct radeon_winsys *ws = ctx->b.ws;
+ struct radeon_cmdbuf *cs = ctx->gfx_cs;
+ struct radeon_winsys *ws = ctx->ws;
+ unsigned wait_flags = 0;
if (ctx->gfx_flush_in_progress)
return;
- if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
+ if (!ctx->screen->info.kernel_flushes_tc_l2_after_ib) {
+ wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH |
+ SI_CONTEXT_INV_GLOBAL_L2;
+ } else if (ctx->chip_class == SI) {
+ /* The kernel flushes L2 before shaders are finished. */
+ wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+ } else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW)) {
+ wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+ }
+
+ /* Drop this flush if it's a no-op. */
+ if (!radeon_emitted(cs, ctx->initial_gfx_cs_size) &&
+ (!wait_flags || !ctx->gfx_last_ib_is_busy))
return;
if (si_check_device_reset(ctx))
* This code is only needed when the driver flushes the GFX IB
* internally, and it never asks for a fence handle.
*/
- if (radeon_emitted(ctx->b.dma_cs, 0)) {
+ if (radeon_emitted(ctx->dma_cs, 0)) {
assert(fence == NULL); /* internal flushes only */
si_flush_dma_cs(ctx, flags, NULL);
}
ctx->gfx_flush_in_progress = true;
- if (!LIST_IS_EMPTY(&ctx->b.active_queries))
+ if (!LIST_IS_EMPTY(&ctx->active_queries))
si_suspend_queries(ctx);
ctx->streamout.suspended = false;
ctx->streamout.suspended = true;
}
- ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
- SI_CONTEXT_PS_PARTIAL_FLUSH;
-
- /* DRM 3.1.0 doesn't flush TC for VI correctly. */
- if (ctx->b.chip_class == VI && ctx->b.screen->info.drm_minor <= 1)
- ctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_INV_VMEM_L1;
+ /* Make sure CP DMA is idle at the end of IBs after L2 prefetches
+ * because the kernel doesn't wait for it. */
+ if (ctx->chip_class >= CIK)
+ si_cp_dma_wait_for_idle(ctx);
- si_emit_cache_flush(ctx);
+ /* Wait for draw calls to finish if needed. */
+ if (wait_flags) {
+ ctx->flags |= wait_flags;
+ si_emit_cache_flush(ctx);
+ }
+ ctx->gfx_last_ib_is_busy = wait_flags == 0;
if (ctx->current_saved_cs) {
si_trace_emit(ctx);
}
/* Flush the CS. */
- ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
+ ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
if (fence)
- ws->fence_reference(fence, ctx->b.last_gfx_fence);
+ ws->fence_reference(fence, ctx->last_gfx_fence);
/* This must be after cs_flush returns, since the context's API
* thread can concurrently read this value in si_fence_finish. */
- ctx->b.num_gfx_cs_flushes++;
+ ctx->num_gfx_cs_flushes++;
/* Check VM faults if needed. */
if (ctx->screen->debug_flags & DBG(CHECK_VM)) {
/* Use conservative timeout 800ms, after which we won't wait any
* longer and assume the GPU is hung.
*/
- ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_gfx_fence, 800*1000*1000);
+ ctx->ws->fence_wait(ctx->ws, ctx->last_gfx_fence, 800*1000*1000);
si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, RING_GFX);
}
pipe_reference_init(&ctx->current_saved_cs->reference, 1);
- ctx->current_saved_cs->trace_buf = (struct r600_resource*)
- pipe_buffer_create(ctx->b.b.screen, 0,
- PIPE_USAGE_STAGING, 8);
+ ctx->current_saved_cs->trace_buf = r600_resource(
+ pipe_buffer_create(ctx->b.screen, 0, PIPE_USAGE_STAGING, 8));
if (!ctx->current_saved_cs->trace_buf) {
free(ctx->current_saved_cs);
ctx->current_saved_cs = NULL;
return;
}
- pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->current_saved_cs->trace_buf->b.b,
+ pipe_buffer_write_nooverlap(&ctx->b, &ctx->current_saved_cs->trace_buf->b.b,
0, sizeof(zeros), zeros);
ctx->current_saved_cs->trace_id = 0;
si_trace_emit(ctx);
- radeon_add_to_buffer_list(ctx, ctx->b.gfx_cs, ctx->current_saved_cs->trace_buf,
+ radeon_add_to_buffer_list(ctx, ctx->gfx_cs, ctx->current_saved_cs->trace_buf,
RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE);
}
if (ctx->is_debug)
si_begin_gfx_cs_debug(ctx);
- /* Flush read caches at the beginning of CS not flushed by the kernel. */
- if (ctx->b.chip_class >= CIK)
- ctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
- SI_CONTEXT_INV_ICACHE;
-
- ctx->b.flags |= SI_CONTEXT_START_PIPELINE_STATS;
+ /* Always invalidate caches at the beginning of IBs, because external
+ * users (e.g. BO evictions and SDMA/UVD/VCE IBs) can modify our
+ * buffers.
+ *
+ * Note that the cache flush done by the kernel at the end of GFX IBs
+ * isn't useful here, because that flush can finish after the following
+ * IB starts drawing.
+ *
+ * TODO: Do we also need to invalidate CB & DB caches?
+ */
+ ctx->flags |= SI_CONTEXT_INV_ICACHE |
+ SI_CONTEXT_INV_SMEM_L1 |
+ SI_CONTEXT_INV_VMEM_L1 |
+ SI_CONTEXT_INV_GLOBAL_L2 |
+ SI_CONTEXT_START_PIPELINE_STATS;
/* set all valid group as dirty so they get reemited on
* next draw command
}
/* This should always be marked as dirty to set the framebuffer scissor
* at least. */
- si_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.framebuffer);
- si_mark_atom_dirty(ctx, &ctx->clip_regs);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_regs);
/* CLEAR_STATE sets zeros. */
if (!has_clear_state || ctx->clip_state.any_nonzeros)
- si_mark_atom_dirty(ctx, &ctx->clip_state.atom);
- ctx->msaa_sample_locs.nr_samples = 0;
- si_mark_atom_dirty(ctx, &ctx->msaa_sample_locs.atom);
- si_mark_atom_dirty(ctx, &ctx->msaa_config);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_state);
+ ctx->sample_locs_num_samples = 0;
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_sample_locs);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_config);
/* CLEAR_STATE sets 0xffff. */
- if (!has_clear_state || ctx->sample_mask.sample_mask != 0xffff)
- si_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
- si_mark_atom_dirty(ctx, &ctx->cb_render_state);
+ if (!has_clear_state || ctx->sample_mask != 0xffff)
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.sample_mask);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.cb_render_state);
/* CLEAR_STATE sets zeros. */
if (!has_clear_state || ctx->blend_color.any_nonzeros)
- si_mark_atom_dirty(ctx, &ctx->blend_color.atom);
- si_mark_atom_dirty(ctx, &ctx->db_render_state);
- if (ctx->b.chip_class >= GFX9)
- si_mark_atom_dirty(ctx, &ctx->dpbb_state);
- si_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
- si_mark_atom_dirty(ctx, &ctx->spi_map);
- si_mark_atom_dirty(ctx, &ctx->streamout.enable_atom);
- si_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.blend_color);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.db_render_state);
+ if (ctx->chip_class >= GFX9)
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.dpbb_state);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.stencil_ref);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_map);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.streamout_enable);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.render_cond);
si_all_descriptors_begin_new_cs(ctx);
si_all_resident_buffers_begin_new_cs(ctx);
ctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
ctx->viewports.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
ctx->viewports.depth_range_dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(ctx, &ctx->scissors.atom);
- si_mark_atom_dirty(ctx, &ctx->viewports.atom);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
- si_mark_atom_dirty(ctx, &ctx->scratch_state);
+ si_mark_atom_dirty(ctx, &ctx->atoms.s.scratch_state);
if (ctx->scratch_buffer) {
si_context_add_resource_size(ctx, &ctx->scratch_buffer->b.b);
}
si_streamout_buffers_dirty(ctx);
}
- if (!LIST_IS_EMPTY(&ctx->b.active_queries))
+ if (!LIST_IS_EMPTY(&ctx->active_queries))
si_resume_queries(ctx);
- assert(!ctx->b.gfx_cs->prev_dw);
- ctx->b.initial_gfx_cs_size = ctx->b.gfx_cs->current.cdw;
+ assert(!ctx->gfx_cs->prev_dw);
+ ctx->initial_gfx_cs_size = ctx->gfx_cs->current.cdw;
/* Invalidate various draw states so that they are emitted before
* the first draw call. */
ctx->last_index_size = -1;
ctx->last_primitive_restart_en = -1;
ctx->last_restart_index = SI_RESTART_INDEX_UNKNOWN;
- ctx->last_gs_out_prim = -1;
ctx->last_prim = -1;
ctx->last_multi_vgt_param = -1;
ctx->last_rast_prim = -1;
ctx->last_tcs = NULL;
ctx->last_tes_sh_base = -1;
ctx->last_num_tcs_input_cp = -1;
+ ctx->last_ls_hs_config = -1; /* impossible value */
ctx->cs_shader_state.initialized = false;
+
+ /* Set all saved registers state to unknown */
+ ctx->tracked_regs.reg_saved = 0;
}