#include "si_pipe.h"
+static unsigned si_descriptor_list_cs_space(unsigned count, unsigned element_size)
+{
+ /* Ensure we have enough space to start a new range in a hole */
+ assert(element_size >= 3);
+
+ /* 5 dwords for possible load to reinitialize when we have no preamble
+ * IB + 5 dwords for write to L2 + 3 bytes for every range written to
+ * CE RAM.
+ */
+ return 5 + 5 + 3 + count * element_size;
+}
+
+static unsigned si_ce_needed_cs_space(void)
+{
+ unsigned space = 0;
+
+ space += si_descriptor_list_cs_space(SI_NUM_CONST_BUFFERS, 4);
+ space += si_descriptor_list_cs_space(SI_NUM_SHADER_BUFFERS, 4);
+ space += si_descriptor_list_cs_space(SI_NUM_SAMPLERS, 16);
+ space += si_descriptor_list_cs_space(SI_NUM_IMAGES, 8);
+ space *= SI_NUM_SHADERS;
+
+ space += si_descriptor_list_cs_space(SI_NUM_RW_BUFFERS, 4);
+
+ /* Increment CE counter packet */
+ space += 2;
+
+ return space;
+}
+
/* initialize */
-void si_need_cs_space(struct si_context *ctx, unsigned num_dw,
- boolean count_draw_in)
+void si_need_cs_space(struct si_context *ctx)
{
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *ce_ib = ctx->ce_ib;
+ struct radeon_winsys_cs *dma = ctx->b.dma.cs;
+
+ /* Flush the DMA IB if it's not empty. */
+ if (radeon_emitted(dma, 0))
+ ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
/* There are two memory usage counters in the winsys for all buffers
- * that have been added (cs_add_reloc) and two counters in the pipe
+ * that have been added (cs_add_buffer) and two counters in the pipe
* driver for those that haven't been added yet.
*/
- if (unlikely(!ctx->b.ws->cs_memory_below_limit(ctx->b.rings.gfx.cs,
+ if (unlikely(!ctx->b.ws->cs_memory_below_limit(ctx->b.gfx.cs,
ctx->b.vram, ctx->b.gtt))) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return;
}
ctx->b.gtt = 0;
/* If the CS is sufficiently large, don't count the space needed
* and just flush if there is not enough space left.
*/
- if (unlikely(cs->cdw > cs->max_dw - 2048))
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ if (!ctx->b.ws->cs_check_space(cs, 2048) ||
+ (ce_ib && !ctx->b.ws->cs_check_space(ce_ib, si_ce_needed_cs_space())))
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
void si_context_gfx_flush(void *context, unsigned flags,
struct pipe_fence_handle **fence)
{
struct si_context *ctx = context;
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys *ws = ctx->b.ws;
- if (cs->cdw == ctx->b.initial_gfx_cs_size &&
+ if (ctx->gfx_flush_in_progress)
+ return;
+
+ ctx->gfx_flush_in_progress = true;
+
+ if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size) &&
(!fence || ctx->last_gfx_fence)) {
if (fence)
ws->fence_reference(fence, ctx->last_gfx_fence);
if (!(flags & RADEON_FLUSH_ASYNC))
ws->cs_sync_flush(cs);
+ ctx->gfx_flush_in_progress = false;
return;
}
- ctx->b.rings.gfx.flushing = true;
-
r600_preflush_suspend_features(&ctx->b);
- ctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER |
- SI_CONTEXT_INV_TC_L1 |
- SI_CONTEXT_INV_TC_L2 |
- /* this is probably not needed anymore */
+ ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
SI_CONTEXT_PS_PARTIAL_FLUSH;
+
+ /* DRM 3.1.0 doesn't flush TC for VI correctly. */
+ if (ctx->b.chip_class == VI && ctx->b.screen->info.drm_minor <= 1)
+ ctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2 |
+ SI_CONTEXT_INV_VMEM_L1;
+
si_emit_cache_flush(ctx, NULL);
/* force to keep tiling flags */
if (ctx->trace_buf)
si_trace_emit(ctx);
- /* Save the IB for debug contexts. */
if (ctx->is_debug) {
- free(ctx->last_ib);
- ctx->last_ib_dw_size = cs->cdw;
- ctx->last_ib = malloc(cs->cdw * 4);
- memcpy(ctx->last_ib, cs->buf, cs->cdw * 4);
+ /* Save the IB for debug contexts. */
+ radeon_clear_saved_cs(&ctx->last_gfx);
+ radeon_save_cs(ws, cs, &ctx->last_gfx);
r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
r600_resource_reference(&ctx->trace_buf, NULL);
}
/* Flush the CS. */
- ws->cs_flush(cs, flags, &ctx->last_gfx_fence,
- ctx->screen->b.cs_count++);
- ctx->b.rings.gfx.flushing = false;
+ ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
if (fence)
ws->fence_reference(fence, ctx->last_gfx_fence);
+ /* Check VM faults if needed. */
+ if (ctx->screen->b.debug_flags & DBG_CHECK_VM) {
+ /* Use conservative timeout 800ms, after which we won't wait any
+ * longer and assume the GPU is hung.
+ */
+ ctx->b.ws->fence_wait(ctx->b.ws, ctx->last_gfx_fence, 800*1000*1000);
+
+ si_check_vm_faults(ctx);
+ }
+
si_begin_new_cs(ctx);
+ ctx->gfx_flush_in_progress = false;
}
void si_begin_new_cs(struct si_context *ctx)
if (ctx->trace_buf)
si_trace_emit(ctx);
- /* Flush read caches at the beginning of CS. */
- ctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER |
- SI_CONTEXT_INV_TC_L1 |
- SI_CONTEXT_INV_TC_L2 |
- SI_CONTEXT_INV_KCACHE |
- SI_CONTEXT_INV_ICACHE;
+ /* Flush read caches at the beginning of CS not flushed by the kernel. */
+ if (ctx->b.chip_class >= CIK)
+ ctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
+ SI_CONTEXT_INV_ICACHE;
+
+ ctx->b.flags |= R600_CONTEXT_START_PIPELINE_STATS;
/* set all valid group as dirty so they get reemited on
* next draw command
/* The CS initialization should be emitted before everything else. */
si_pm4_emit(ctx, ctx->init_config);
+ if (ctx->init_config_gs_rings)
+ si_pm4_emit(ctx, ctx->init_config_gs_rings);
+
+ if (ctx->ce_preamble_ib)
+ si_ce_enable_loads(ctx->ce_preamble_ib);
+ else if (ctx->ce_ib)
+ si_ce_enable_loads(ctx->ce_ib);
+
+ if (ctx->ce_preamble_ib)
+ si_ce_reinitialize_all_descriptors(ctx);
ctx->framebuffer.dirty_cbufs = (1 << 8) - 1;
ctx->framebuffer.dirty_zsbuf = true;
si_mark_atom_dirty(ctx, &ctx->msaa_sample_locs);
si_mark_atom_dirty(ctx, &ctx->msaa_config);
si_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
- si_mark_atom_dirty(ctx, &ctx->cb_target_mask);
+ si_mark_atom_dirty(ctx, &ctx->cb_render_state);
si_mark_atom_dirty(ctx, &ctx->blend_color.atom);
si_mark_atom_dirty(ctx, &ctx->db_render_state);
si_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
si_mark_atom_dirty(ctx, &ctx->spi_map);
si_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
+ si_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
si_all_descriptors_begin_new_cs(ctx);
- ctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- ctx->viewports.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(ctx, &ctx->scissors.atom);
- si_mark_atom_dirty(ctx, &ctx->viewports.atom);
+ ctx->b.scissors.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->b.viewports.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ si_mark_atom_dirty(ctx, &ctx->b.scissors.atom);
+ si_mark_atom_dirty(ctx, &ctx->b.viewports.atom);
r600_postflush_resume_features(&ctx->b);
- ctx->b.initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;
+ assert(!ctx->b.gfx.cs->prev_dw);
+ ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
/* Invalidate various draw states so that they are emitted before
* the first draw call. */
ctx->last_tcs = NULL;
ctx->last_tes_sh_base = -1;
ctx->last_num_tcs_input_cp = -1;
+
+ ctx->cs_shader_state.initialized = false;
}