sctx->ws->cs_is_buffer_referenced(sctx->gfx_cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return NULL;
} else {
- si_flush_gfx_cs(sctx, 0, NULL);
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
busy = true;
}
}
if (radeon_emitted(ctx->gfx_cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs,
res->buf, RADEON_USAGE_READWRITE)) {
- si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}
if (radeon_emitted(ctx->dma_cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma_cs,
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx_cs, src->buf,
RADEON_USAGE_WRITE))))
- si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* is too large.
* not going to wait.
*/
threaded_context_unwrap_sync(ctx);
- si_flush_gfx_cs(sctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx,
+ (timeout ? 0 : PIPE_FLUSH_ASYNC) |
+ RADEON_FLUSH_START_NEXT_GFX_IB_NOW,
+ NULL);
rfence->gfx_unflushed.ctx = NULL;
if (!timeout)
ctx->vram, ctx->gtt))) {
ctx->gtt = 0;
ctx->vram = 0;
- si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return;
}
ctx->gtt = 0;
*/
unsigned need_dwords = 2048 + ctx->num_cs_dw_queries_suspend;
if (!ctx->ws->cs_check_space(cs, need_dwords))
- si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}
void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
!radeon_cs_memory_below_limit(sctx->screen, sctx->gfx_cs,
sctx->vram + rbo->vram_usage,
sctx->gtt + rbo->gart_usage))
- si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs, rbo, usage, priority);
}
/* Flush the context to re-emit both init_config states. */
sctx->initial_gfx_cs_size = 0; /* force flush */
- si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
/* Set ring bindings. */
if (sctx->esgs_ring) {
*/
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
sctx->initial_gfx_cs_size = 0; /* force flush */
- si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}
/**
* The result is that the kernel memory manager is never a bottleneck.
*/
if (sctx->num_alloc_tex_transfer_bytes > sctx->screen->info.gart_size / 4) {
- si_flush_gfx_cs(sctx, PIPE_FLUSH_ASYNC, NULL);
+ si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
sctx->num_alloc_tex_transfer_bytes = 0;
}
* Only check whether the buffer is being used for write. */
if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
RADEON_USAGE_WRITE)) {
- cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return NULL;
}
}
} else {
if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return NULL;
}
if (cs) {
if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
RADEON_USAGE_WRITE)) {
- cs->flush_cs(cs->flush_data, 0, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
} else {
/* Try to avoid busy-waiting in amdgpu_bo_wait. */
if (p_atomic_read(&bo->num_active_ioctls))
/* Mapping for write. */
if (cs) {
if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, 0, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
} else {
/* Try to avoid busy-waiting in amdgpu_bo_wait. */
if (p_atomic_read(&bo->num_active_ioctls))
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
- cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return NULL;
}
}
} else {
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
return NULL;
}
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
- cs->flush_cs(cs->flush_data, 0, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
}
radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_WRITE);
/* Mapping for write. */
if (cs) {
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, 0, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
} else {
/* Try to avoid busy-waiting in radeon_bo_wait. */
if (p_atomic_read(&bo->num_active_ioctls))
/* Flush if there are any relocs. Clean up otherwise. */
if (cs->csc->num_relocs) {
- cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data,
+ RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
} else {
radeon_cs_context_cleanup(cs->csc);
cs->base.used_vram = 0;