*/
#include "si_pipe.h"
+#include "radeon/r600_cs.h"
+
+static unsigned si_descriptor_list_cs_space(unsigned count, unsigned element_size)
+{
+ /* Ensure we have enough space to start a new range in a hole */
+ assert(element_size >= 3);
+
+ /* 5 dwords for write to L2 + 3 bytes for the packet header of
+ * every disjoint range written to CE RAM.
+ */
+ return 5 + (3 * count / 2) + count * element_size;
+}
+
+static unsigned si_ce_needed_cs_space(void)
+{
+ unsigned space = 0;
+
+ space += si_descriptor_list_cs_space(SI_NUM_SHADER_BUFFERS +
+ SI_NUM_CONST_BUFFERS, 4);
+ /* two 8-byte images share one 16-byte slot */
+ space += si_descriptor_list_cs_space(SI_NUM_IMAGES / 2 +
+ SI_NUM_SAMPLERS, 16);
+ space *= SI_NUM_SHADERS;
+
+ space += si_descriptor_list_cs_space(SI_NUM_RW_BUFFERS, 4);
+
+ /* Increment CE counter packet */
+ space += 2;
+
+ return space;
+}
/* initialize */
void si_need_cs_space(struct si_context *ctx)
{
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
- struct radeon_winsys_cs *dma = ctx->b.dma.cs;
+ struct radeon_winsys_cs *ce_ib = ctx->ce_ib;
- /* Flush the DMA IB if it's not empty. */
- if (dma && dma->cdw)
- ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ /* There is no need to flush the DMA IB here, because
+ * r600_need_dma_space always flushes the GFX IB if there is
+ * a conflict, which means any unflushed DMA commands automatically
+ * precede the GFX IB (= they had no dependency on the GFX IB when
+ * they were submitted).
+ */
/* There are two memory usage counters in the winsys for all buffers
* that have been added (cs_add_buffer) and two counters in the pipe
* driver for those that haven't been added yet.
*/
- if (unlikely(!ctx->b.ws->cs_memory_below_limit(ctx->b.gfx.cs,
- ctx->b.vram, ctx->b.gtt))) {
+ if (unlikely(!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
+ ctx->b.vram, ctx->b.gtt))) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
/* If the CS is sufficiently large, don't count the space needed
* and just flush if there is not enough space left.
*/
- if (unlikely(cs->cdw > cs->max_dw - 2048))
+ if (!ctx->b.ws->cs_check_space(cs, 2048) ||
+ (ce_ib && !ctx->b.ws->cs_check_space(ce_ib, si_ce_needed_cs_space())))
ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
}
if (ctx->gfx_flush_in_progress)
return;
- ctx->gfx_flush_in_progress = true;
+ if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
+ return;
- if (cs->cdw == ctx->b.initial_gfx_cs_size &&
- (!fence || ctx->last_gfx_fence)) {
- if (fence)
- ws->fence_reference(fence, ctx->last_gfx_fence);
- if (!(flags & RADEON_FLUSH_ASYNC))
- ws->cs_sync_flush(cs);
- ctx->gfx_flush_in_progress = false;
+ if (r600_check_device_reset(&ctx->b))
return;
+
+ if (ctx->screen->b.debug_flags & DBG_CHECK_VM)
+ flags &= ~RADEON_FLUSH_ASYNC;
+
+ /* If the state tracker is flushing the GFX IB, r600_flush_from_st is
+ * responsible for flushing the DMA IB and merging the fences from both.
+ * This code is only needed when the driver flushes the GFX IB
+ * internally, and it never asks for a fence handle.
+ */
+ if (radeon_emitted(ctx->b.dma.cs, 0)) {
+ assert(fence == NULL); /* internal flushes only */
+ ctx->b.dma.flush(ctx, flags, NULL);
}
+ ctx->gfx_flush_in_progress = true;
+
+ /* This CE dump should be done in parallel with the last draw. */
+ if (ctx->ce_ib)
+ si_ce_save_all_descriptors_at_ib_end(ctx);
+
r600_preflush_suspend_features(&ctx->b);
- ctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER |
- SI_CONTEXT_INV_VMEM_L1 |
- SI_CONTEXT_INV_GLOBAL_L2 |
- /* this is probably not needed anymore */
+ ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
SI_CONTEXT_PS_PARTIAL_FLUSH;
- si_emit_cache_flush(ctx, NULL);
- /* force to keep tiling flags */
- flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
+ /* DRM 3.1.0 doesn't flush TC for VI correctly. */
+ if (ctx->b.chip_class == VI && ctx->b.screen->info.drm_minor <= 1)
+ ctx->b.flags |= SI_CONTEXT_INV_GLOBAL_L2 |
+ SI_CONTEXT_INV_VMEM_L1;
+
+ si_emit_cache_flush(ctx);
if (ctx->trace_buf)
si_trace_emit(ctx);
if (ctx->is_debug) {
- unsigned i;
-
/* Save the IB for debug contexts. */
- free(ctx->last_ib);
- ctx->last_ib_dw_size = cs->cdw;
- ctx->last_ib = malloc(cs->cdw * 4);
- memcpy(ctx->last_ib, cs->buf, cs->cdw * 4);
+ radeon_clear_saved_cs(&ctx->last_gfx);
+ radeon_save_cs(ws, cs, &ctx->last_gfx);
r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
r600_resource_reference(&ctx->trace_buf, NULL);
-
- /* Save the buffer list. */
- if (ctx->last_bo_list) {
- for (i = 0; i < ctx->last_bo_count; i++)
- pb_reference(&ctx->last_bo_list[i].buf, NULL);
- free(ctx->last_bo_list);
- }
- ctx->last_bo_count = ws->cs_get_buffer_list(cs, NULL);
- ctx->last_bo_list = calloc(ctx->last_bo_count,
- sizeof(ctx->last_bo_list[0]));
- ws->cs_get_buffer_list(cs, ctx->last_bo_list);
}
/* Flush the CS. */
- ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
-
+ ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
if (fence)
- ws->fence_reference(fence, ctx->last_gfx_fence);
+ ws->fence_reference(fence, ctx->b.last_gfx_fence);
+ ctx->b.num_gfx_cs_flushes++;
/* Check VM faults if needed. */
- if (ctx->screen->b.debug_flags & DBG_CHECK_VM)
- si_check_vm_faults(ctx);
+ if (ctx->screen->b.debug_flags & DBG_CHECK_VM) {
+ /* Use conservative timeout 800ms, after which we won't wait any
+ * longer and assume the GPU is hung.
+ */
+ ctx->b.ws->fence_wait(ctx->b.ws, ctx->b.last_gfx_fence, 800*1000*1000);
+
+ si_check_vm_faults(&ctx->b, &ctx->last_gfx, RING_GFX);
+ }
si_begin_new_cs(ctx);
ctx->gfx_flush_in_progress = false;
/* Create a buffer used for writing trace IDs and initialize it to 0. */
assert(!ctx->trace_buf);
ctx->trace_buf = (struct r600_resource*)
- pipe_buffer_create(ctx->b.b.screen, PIPE_BIND_CUSTOM,
+ pipe_buffer_create(ctx->b.b.screen, 0,
PIPE_USAGE_STAGING, 4);
if (ctx->trace_buf)
pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->trace_buf->b.b,
if (ctx->trace_buf)
si_trace_emit(ctx);
- /* Flush read caches at the beginning of CS. */
- ctx->b.flags |= SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER |
- SI_CONTEXT_INV_VMEM_L1 |
- SI_CONTEXT_INV_GLOBAL_L2 |
- SI_CONTEXT_INV_SMEM_L1 |
- SI_CONTEXT_INV_ICACHE;
+ /* Flush read caches at the beginning of CS not flushed by the kernel. */
+ if (ctx->b.chip_class >= CIK)
+ ctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
+ SI_CONTEXT_INV_ICACHE;
+
+ ctx->b.flags |= R600_CONTEXT_START_PIPELINE_STATS;
/* set all valid group as dirty so they get reemited on
* next draw command
if (ctx->init_config_gs_rings)
si_pm4_emit(ctx, ctx->init_config_gs_rings);
- ctx->framebuffer.dirty_cbufs = (1 << 8) - 1;
- ctx->framebuffer.dirty_zsbuf = true;
+ if (ctx->ce_preamble_ib)
+ si_ce_enable_loads(ctx->ce_preamble_ib);
+ else if (ctx->ce_ib)
+ si_ce_enable_loads(ctx->ce_ib);
+
+ if (ctx->ce_ib)
+ si_ce_restore_all_descriptors_at_ib_start(ctx);
+
+ if (ctx->b.chip_class >= CIK)
+ si_mark_atom_dirty(ctx, &ctx->prefetch_L2);
+
+ /* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */
+ ctx->framebuffer.dirty_cbufs =
+ u_bit_consecutive(0, ctx->framebuffer.state.nr_cbufs);
+ /* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
+ ctx->framebuffer.dirty_zsbuf = ctx->framebuffer.state.zsbuf != NULL;
+ /* This should always be marked as dirty to set the framebuffer scissor
+ * at least. */
si_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
si_mark_atom_dirty(ctx, &ctx->clip_regs);
- si_mark_atom_dirty(ctx, &ctx->clip_state.atom);
- si_mark_atom_dirty(ctx, &ctx->msaa_sample_locs);
+ /* CLEAR_STATE sets zeros. */
+ if (ctx->clip_state.any_nonzeros)
+ si_mark_atom_dirty(ctx, &ctx->clip_state.atom);
+ ctx->msaa_sample_locs.nr_samples = 0;
+ si_mark_atom_dirty(ctx, &ctx->msaa_sample_locs.atom);
si_mark_atom_dirty(ctx, &ctx->msaa_config);
- si_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
+ /* CLEAR_STATE sets 0xffff. */
+ if (ctx->sample_mask.sample_mask != 0xffff)
+ si_mark_atom_dirty(ctx, &ctx->sample_mask.atom);
si_mark_atom_dirty(ctx, &ctx->cb_render_state);
- si_mark_atom_dirty(ctx, &ctx->blend_color.atom);
+ /* CLEAR_STATE sets zeros. */
+ if (ctx->blend_color.any_nonzeros)
+ si_mark_atom_dirty(ctx, &ctx->blend_color.atom);
si_mark_atom_dirty(ctx, &ctx->db_render_state);
si_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
si_mark_atom_dirty(ctx, &ctx->spi_map);
si_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
si_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
si_all_descriptors_begin_new_cs(ctx);
-
- ctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- ctx->viewports.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
- si_mark_atom_dirty(ctx, &ctx->scissors.atom);
- si_mark_atom_dirty(ctx, &ctx->viewports.atom);
+ si_all_resident_buffers_begin_new_cs(ctx);
+
+ ctx->b.scissors.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->b.viewports.dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ ctx->b.viewports.depth_range_dirty_mask = (1 << R600_MAX_VIEWPORTS) - 1;
+ si_mark_atom_dirty(ctx, &ctx->b.scissors.atom);
+ si_mark_atom_dirty(ctx, &ctx->b.viewports.atom);
+
+ si_mark_atom_dirty(ctx, &ctx->scratch_state);
+ if (ctx->scratch_buffer) {
+ r600_context_add_resource_size(&ctx->b.b,
+ &ctx->scratch_buffer->b.b);
+ }
r600_postflush_resume_features(&ctx->b);
- ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->cdw;
+ assert(!ctx->b.gfx.cs->prev_dw);
+ ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
/* Invalidate various draw states so that they are emitted before
* the first draw call. */
si_invalidate_draw_sh_constants(ctx);
+ ctx->last_index_size = -1;
ctx->last_primitive_restart_en = -1;
ctx->last_restart_index = SI_RESTART_INDEX_UNKNOWN;
ctx->last_gs_out_prim = -1;
ctx->last_prim = -1;
ctx->last_multi_vgt_param = -1;
- ctx->last_ls_hs_config = -1;
ctx->last_rast_prim = -1;
ctx->last_sc_line_stipple = ~0;
- ctx->emit_scratch_reloc = true;
+ ctx->last_vs_state = ~0;
ctx->last_ls = NULL;
ctx->last_tcs = NULL;
ctx->last_tes_sh_base = -1;
ctx->last_num_tcs_input_cp = -1;
+
+ ctx->cs_shader_state.initialized = false;
}