#include "si_pipe.h"
-/* initialize */
-void si_need_cs_space(struct si_context *ctx, unsigned num_dw,
- boolean count_draw_in)
+static unsigned si_descriptor_list_cs_space(unsigned count, unsigned element_size)
{
- int i;
+ /* Ensure we have enough space to start a new range in a hole */
+ assert(element_size >= 3);
- /* The number of dwords we already used in the CS so far. */
- num_dw += ctx->b.rings.gfx.cs->cdw;
+ /* 5 dwords for possible load to reinitialize when we have no preamble
+ * IB + 5 dwords for write to L2 + 3 bytes for every range written to
+ * CE RAM.
+ */
+ return 5 + 5 + 3 + count * element_size;
+}
- if (count_draw_in) {
- for (i = 0; i < SI_NUM_ATOMS(ctx); i++) {
- if (ctx->atoms.array[i]->dirty) {
- num_dw += ctx->atoms.array[i]->num_dw;
- }
- }
+static unsigned si_ce_needed_cs_space(void)
+{
+ unsigned space = 0;
- /* The number of dwords all the dirty states would take. */
- num_dw += si_pm4_dirty_dw(ctx);
+ space += si_descriptor_list_cs_space(SI_NUM_CONST_BUFFERS, 4);
+ space += si_descriptor_list_cs_space(SI_NUM_SHADER_BUFFERS, 4);
+ space += si_descriptor_list_cs_space(SI_NUM_SAMPLERS, 16);
+ space += si_descriptor_list_cs_space(SI_NUM_IMAGES, 8);
+ space *= SI_NUM_SHADERS;
- /* The upper-bound of how much a draw command would take. */
- num_dw += SI_MAX_DRAW_CS_DWORDS;
- }
+ space += si_descriptor_list_cs_space(SI_NUM_RW_BUFFERS, 4);
- /* Count in queries_suspend. */
- num_dw += ctx->b.num_cs_dw_nontimer_queries_suspend;
+ /* Increment CE counter packet */
+ space += 2;
- /* Count in streamout_end at the end of CS. */
- if (ctx->b.streamout.begin_emitted) {
- num_dw += ctx->b.streamout.num_dw_for_end;
- }
+ return space;
+}
- /* Count in render_condition(NULL) at the end of CS. */
- if (ctx->b.predicate_drawing) {
- num_dw += 3;
- }
+/* initialize */
+void si_need_cs_space(struct si_context *ctx)
+{
+ struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
+ struct radeon_winsys_cs *ce_ib = ctx->ce_ib;
+ struct radeon_winsys_cs *dma = ctx->b.dma.cs;
- /* Count in framebuffer cache flushes at the end of CS. */
- num_dw += ctx->atoms.s.cache_flush->num_dw;
+ /* Flush the DMA IB if it's not empty. */
+ if (radeon_emitted(dma, 0))
+ ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
-#if SI_TRACE_CS
- if (ctx->screen->b.trace_bo) {
- num_dw += SI_TRACE_CS_DWORDS;
+ /* There are two memory usage counters in the winsys for all buffers
+ * that have been added (cs_add_buffer) and two counters in the pipe
+ * driver for those that haven't been added yet.
+ */
+ if (unlikely(!ctx->b.ws->cs_memory_below_limit(ctx->b.gfx.cs,
+ ctx->b.vram, ctx->b.gtt))) {
+ ctx->b.gtt = 0;
+ ctx->b.vram = 0;
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ return;
}
-#endif
+ ctx->b.gtt = 0;
+ ctx->b.vram = 0;
- /* Flush if there's not enough space. */
- if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
- ctx->b.rings.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
- }
+ /* If the CS is sufficiently large, don't count the space needed
+ * and just flush if there is not enough space left.
+ */
+ if (!ctx->b.ws->cs_check_space(cs, 2048) ||
+ (ce_ib && !ctx->b.ws->cs_check_space(ce_ib, si_ce_needed_cs_space())))
+ ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
void si_context_gfx_flush(void *context, unsigned flags,
struct pipe_fence_handle **fence)
{
struct si_context *ctx = context;
- struct radeon_winsys_cs *cs = ctx->b.rings.gfx.cs;
+ struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
struct radeon_winsys *ws = ctx->b.ws;
- if (cs->cdw == ctx->b.initial_gfx_cs_size &&
+ if (ctx->gfx_flush_in_progress)
+ return;
+
+ ctx->gfx_flush_in_progress = true;
+
+ if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size) &&
(!fence || ctx->last_gfx_fence)) {
if (fence)
ws->fence_reference(fence, ctx->last_gfx_fence);
if (!(flags & RADEON_FLUSH_ASYNC))
ws->cs_sync_flush(cs);
+ ctx->gfx_flush_in_progress = false;
return;
}
- ctx->b.rings.gfx.flushing = true;
-
r600_preflush_suspend_features(&ctx->b);
- ctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER |
- SI_CONTEXT_INV_TC_L1 |
- SI_CONTEXT_INV_TC_L2 |
- /* this is probably not needed anymore */
+ ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
SI_CONTEXT_PS_PARTIAL_FLUSH;
- si_emit_cache_flush(&ctx->b, NULL);
+
+ /* DRM 3.1.0 doesn't flush TC for VI correctly. */
+ if (ctx->b.chip_class == VI && ctx->b.screen->info.drm_minor <= 1)
+ ctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2 |
+ SI_CONTEXT_INV_VMEM_L1;
+
+ si_emit_cache_flush(ctx, NULL);
/* force to keep tiling flags */
flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
+ if (ctx->trace_buf)
+ si_trace_emit(ctx);
+
+ if (ctx->is_debug) {
+ /* Save the IB for debug contexts. */
+ radeon_clear_saved_cs(&ctx->last_gfx);
+ radeon_save_cs(ws, cs, &ctx->last_gfx);
+ r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
+ r600_resource_reference(&ctx->trace_buf, NULL);
+ }
+
/* Flush the CS. */
- ws->cs_flush(cs, flags, &ctx->last_gfx_fence,
- ctx->screen->b.cs_count++);
- ctx->b.rings.gfx.flushing = false;
+ ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
if (fence)
ws->fence_reference(fence, ctx->last_gfx_fence);
-#if SI_TRACE_CS
- if (ctx->screen->b.trace_bo) {
- struct si_screen *sscreen = ctx->screen;
- unsigned i;
-
- for (i = 0; i < 10; i++) {
- usleep(5);
- if (!ws->buffer_is_busy(sscreen->b.trace_bo->buf, RADEON_USAGE_READWRITE)) {
- break;
- }
- }
- if (i == 10) {
- fprintf(stderr, "timeout on cs lockup likely happen at cs %d dw %d\n",
- sscreen->b.trace_ptr[1], sscreen->b.trace_ptr[0]);
- } else {
- fprintf(stderr, "cs %d executed in %dms\n", sscreen->b.trace_ptr[1], i * 5);
- }
+ /* Check VM faults if needed. */
+ if (ctx->screen->b.debug_flags & DBG_CHECK_VM) {
+ /* Use conservative timeout 800ms, after which we won't wait any
+ * longer and assume the GPU is hung.
+ */
+ ctx->b.ws->fence_wait(ctx->b.ws, ctx->last_gfx_fence, 800*1000*1000);
+
+ si_check_vm_faults(ctx);
}
-#endif
si_begin_new_cs(ctx);
+ ctx->gfx_flush_in_progress = false;
}
void si_begin_new_cs(struct si_context *ctx)
{
- /* Flush read caches at the beginning of CS. */
- ctx->b.flags |= SI_CONTEXT_INV_TC_L1 |
- SI_CONTEXT_INV_TC_L2 |
- SI_CONTEXT_INV_KCACHE |
- SI_CONTEXT_INV_ICACHE;
+ if (ctx->is_debug) {
+ uint32_t zero = 0;
+
+ /* Create a buffer used for writing trace IDs and initialize it to 0. */
+ assert(!ctx->trace_buf);
+ ctx->trace_buf = (struct r600_resource*)
+ pipe_buffer_create(ctx->b.b.screen, PIPE_BIND_CUSTOM,
+ PIPE_USAGE_STAGING, 4);
+ if (ctx->trace_buf)
+ pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->trace_buf->b.b,
+ 0, sizeof(zero), &zero);
+ ctx->trace_id = 0;
+ }
+
+ if (ctx->trace_buf)
+ si_trace_emit(ctx);
+
+ /* Flush read caches at the beginning of CS not flushed by the kernel. */
+ if (ctx->b.chip_class >= CIK)
+ ctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
+ SI_CONTEXT_INV_ICACHE;
+
+ ctx->b.flags |= R600_CONTEXT_START_PIPELINE_STATS;
/* set all valid group as dirty so they get reemited on
* next draw command
/* The CS initialization should be emitted before everything else. */
si_pm4_emit(ctx, ctx->init_config);
-
- ctx->clip_regs.dirty = true;
- ctx->framebuffer.atom.dirty = true;
- ctx->msaa_sample_locs.dirty = true;
- ctx->msaa_config.dirty = true;
- ctx->db_render_state.dirty = true;
- ctx->b.streamout.enable_atom.dirty = true;
+ if (ctx->init_config_gs_rings)
+ si_pm4_emit(ctx, ctx->init_config_gs_rings);
+
+ if (ctx->ce_preamble_ib)
+ si_ce_enable_loads(ctx->ce_preamble_ib);
+ else if (ctx->ce_ib)
+ si_ce_enable_loads(ctx->ce_ib);
+
+ if (ctx->ce_preamble_ib)
+ si_ce_reinitialize_all_descriptors(ctx);
+
+ ctx->framebuffer.dirty_cbufs = (1 << 8) - 1;
+ ctx->framebuffer.dirty_zsbuf = true;
+ si_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
+
+ si_mark_atom_dirty(ctx, &ctx->clip_regs);
+ si_mark_atom_dirty(ctx, &ctx->clip_state.atom);
+ si_mark_atom_dirty(ctx, &ctx->msaa_sample_locs);
+ si_mark_atom_dirty(ctx, &ctx->msaa_config);
+ si_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
+ si_mark_atom_dirty(ctx, &ctx->cb_render_state);
+ si_mark_atom_dirty(ctx, &ctx->blend_color.atom);
+ si_mark_atom_dirty(ctx, &ctx->db_render_state);
+ si_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
+ si_mark_atom_dirty(ctx, &ctx->spi_map);
+ si_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
+ si_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
si_all_descriptors_begin_new_cs(ctx);
+ ctx->b.scissors.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->b.viewports.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ si_mark_atom_dirty(ctx, &ctx->b.scissors.atom);
+ si_mark_atom_dirty(ctx, &ctx->b.viewports.atom);
+
r600_postflush_resume_features(&ctx->b);
- ctx->b.initial_gfx_cs_size = ctx->b.rings.gfx.cs->cdw;
+ assert(!ctx->b.gfx.cs->prev_dw);
+ ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
/* Invalidate various draw states so that they are emitted before
* the first draw call. */
ctx->last_tcs = NULL;
ctx->last_tes_sh_base = -1;
ctx->last_num_tcs_input_cp = -1;
+
+ ctx->cs_shader_state.initialized = false;
}