ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
- ctx->gfx.flush(ctx, 0, NULL);
+ si_flush_gfx_cs(ctx, 0, NULL);
busy = true;
}
}
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
- ctx->dma.flush(ctx, 0, NULL);
+ si_flush_dma_cs(ctx, 0, NULL);
busy = true;
}
}
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
RADEON_USAGE_WRITE))))
- ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* is too large.
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
- ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
}
ctx->num_dma_calls++;
}
-static void r600_flush_dma_ring(void *ctx, unsigned flags,
- struct pipe_fence_handle **fence)
+void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence)
{
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct radeon_winsys_cs *cs = rctx->dma.cs;
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
res->buf, RADEON_USAGE_READWRITE)) {
- ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
res->buf, RADEON_USAGE_READWRITE)) {
- ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
ctx->ws->cs_sync_flush(ctx->dma.cs);
if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
- r600_flush_dma_ring,
+ si_flush_dma_cs,
rctx);
- rctx->dma.flush = r600_flush_dma_ring;
+ rctx->dma.flush = si_flush_dma_cs;
}
return true;
struct radeon_saved_cs *saved, bool get_buffer_list);
void si_clear_saved_cs(struct radeon_saved_cs *saved);
bool si_check_device_reset(struct r600_common_context *rctx);
+void si_flush_dma_cs(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
/* r600_gpu_load.c */
void si_gpu_load_kill_thread(struct si_screen *sscreen);
* The result is that the kernel memory manager is never a bottleneck.
*/
if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
- rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(rctx, PIPE_FLUSH_ASYNC, NULL);
rctx->num_alloc_tex_transfer_bytes = 0;
}
ctx->b.vram, ctx->b.gtt))) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
return;
}
ctx->b.gtt = 0;
* and just flush if there is not enough space left.
*/
if (!ctx->b.ws->cs_check_space(cs, 2048))
- ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
}
-void si_context_gfx_flush(void *context, unsigned flags,
- struct pipe_fence_handle **fence)
+void si_flush_gfx_cs(void *context, unsigned flags,
+ struct pipe_fence_handle **fence)
{
struct si_context *ctx = context;
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
*/
if (radeon_emitted(ctx->b.dma.cs, 0)) {
assert(fence == NULL); /* internal flushes only */
- ctx->b.dma.flush(ctx, flags, NULL);
+ si_flush_dma_cs(ctx, flags, NULL);
}
ctx->gfx_flush_in_progress = true;
}
sctx->b.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX,
- si_context_gfx_flush, sctx);
- sctx->b.gfx.flush = si_context_gfx_flush;
+ si_flush_gfx_cs, sctx);
+ sctx->b.gfx.flush = si_flush_gfx_cs;
/* Border colors. */
sctx->border_color_table = malloc(SI_MAX_BORDER_COLORS *
/* si_hw_context.c */
void si_destroy_saved_cs(struct si_saved_cs *scs);
-void si_context_gfx_flush(void *context, unsigned flags,
- struct pipe_fence_handle **fence);
+void si_flush_gfx_cs(void *context, unsigned flags,
+ struct pipe_fence_handle **fence);
void si_begin_new_cs(struct si_context *ctx);
void si_need_cs_space(struct si_context *ctx);
/* Flush the context to re-emit both init_config states. */
sctx->b.initial_gfx_cs_size = 0; /* force flush */
- si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
/* Set ring bindings. */
if (sctx->esgs_ring) {
*/
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
sctx->b.initial_gfx_cs_size = 0; /* force flush */
- si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
}
/**