{
/* Flush the DMA IB if it's not empty. */
if (radeon_emitted(ctx->b.dma.cs, 0))
- ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
if (!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
ctx->b.vram, ctx->b.gtt)) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return;
}
/* all will be accounted once relocation are emited */
/* Flush if there's not enough space. */
if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw)) {
- ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
}
radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
}
+ if (rctx->b.flags & R600_CONTEXT_CS_PARTIAL_FLUSH) {
+ radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
+ radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
+ }
+
if (wait_until) {
/* Use of WAIT_UNTIL is deprecated on Cayman+ */
if (rctx->b.family < CHIP_CAYMAN) {
if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size))
return;
+ if (r600_check_device_reset(&ctx->b))
+ return;
+
r600_preflush_suspend_features(&ctx->b);
/* flush the framebuffer cache */
r600_flush_emit(ctx);
+ if (ctx->trace_buf)
+ eg_trace_emit(ctx);
/* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
if (ctx->b.chip_class == R600) {
radeon_set_context_reg(cs, R_028350_SX_MISC, 0);
}
+ if (ctx->is_debug) {
+ /* Save the IB for debug contexts. */
+ radeon_clear_saved_cs(&ctx->last_gfx);
+ radeon_save_cs(ws, cs, &ctx->last_gfx, true);
+ r600_resource_reference(&ctx->last_trace_buf, ctx->trace_buf);
+ r600_resource_reference(&ctx->trace_buf, NULL);
+ }
/* Flush the CS. */
ws->cs_flush(cs, flags, &ctx->b.last_gfx_fence);
if (fence)
ws->fence_reference(fence, ctx->b.last_gfx_fence);
ctx->b.num_gfx_cs_flushes++;
+ if (ctx->is_debug) {
+ if (!ws->fence_wait(ws, ctx->b.last_gfx_fence, 10000000)) {
+ const char *fname = getenv("R600_TRACE");
+ if (!fname)
+ exit(-1);
+ FILE *fl = fopen(fname, "w+");
+ if (fl) {
+ eg_dump_debug_state(&ctx->b.b, fl, 0);
+ fclose(fl);
+ } else
+ perror(fname);
+ exit(-1);
+ }
+ }
r600_begin_new_cs(ctx);
}
{
unsigned shader;
+ if (ctx->is_debug) {
+ uint32_t zero = 0;
+
+ /* Create a buffer used for writing trace IDs and initialize it to 0. */
+ assert(!ctx->trace_buf);
+ ctx->trace_buf = (struct r600_resource*)
+ pipe_buffer_create(ctx->b.b.screen, 0,
+ PIPE_USAGE_STAGING, 4);
+ if (ctx->trace_buf)
+ pipe_buffer_write_nooverlap(&ctx->b.b, &ctx->trace_buf->b.b,
+ 0, sizeof(zero), &zero);
+ ctx->trace_id = 0;
+ }
+
+ if (ctx->trace_buf)
+ eg_trace_emit(ctx);
+
ctx->b.flags = 0;
ctx->b.gtt = 0;
ctx->b.vram = 0;
r600_mark_atom_dirty(ctx, &ctx->db_misc_state.atom);
r600_mark_atom_dirty(ctx, &ctx->db_state.atom);
r600_mark_atom_dirty(ctx, &ctx->framebuffer.atom);
+ if (ctx->b.chip_class >= EVERGREEN) {
+ r600_mark_atom_dirty(ctx, &ctx->fragment_images.atom);
+ r600_mark_atom_dirty(ctx, &ctx->fragment_buffers.atom);
+ r600_mark_atom_dirty(ctx, &ctx->compute_images.atom);
+ r600_mark_atom_dirty(ctx, &ctx->compute_buffers.atom);
+ }
r600_mark_atom_dirty(ctx, &ctx->hw_shader_stages[R600_HW_STAGE_PS].atom);
r600_mark_atom_dirty(ctx, &ctx->poly_offset_state.atom);
r600_mark_atom_dirty(ctx, &ctx->vgt_state.atom);
/* Re-emit the draw state. */
ctx->last_primitive_type = -1;
ctx->last_start_instance = -1;
+ ctx->last_rast_prim = -1;
+ ctx->current_rast_prim = -1;
assert(!ctx->b.gfx.cs->prev_dw);
ctx->b.initial_gfx_cs_size = ctx->b.gfx.cs->current.cdw;
&offset, (struct pipe_resource**)&buf);
if (!buf) {
/* This is too heavyweight, but will work. */
- rctx->b.gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
return;
}
src_offset += csize << 2;
size -= csize;
}
- r600_dma_emit_wait_idle(&rctx->b);
}