r300_get_num_cs_end_dwords(r300);
/* Reserve CS space. */
- if (!r300->rws->cs_check_space(r300->cs, dwords)) {
+ if (!r300->rws->cs_check_space(r300->cs, dwords, false)) {
r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
}
cs_dwords += r300_get_num_cs_end_dwords(r300);
/* Reserve requested CS space. */
- if (!r300->rws->cs_check_space(r300->cs, cs_dwords)) {
+ if (!r300->rws->cs_check_space(r300->cs, cs_dwords, false)) {
r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
flushed = TRUE;
}
num_dw += 10;
/* Flush if there's not enough space. */
- if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw)) {
+ if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw, false)) {
ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
}
* engine busy while uploads are being submitted.
*/
num_dw++; /* for emit_wait_idle below */
- if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
+ if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw, false) ||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
*
* \param cs A command stream.
* \param dw Number of CS dwords requested by the caller.
+ * \param force_chaining Chain the IB into a new buffer now to discard
+ * the CP prefetch cache (to emulate PKT3_REWIND)
+ * \return true if there is enough space
*/
- bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw);
+ bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw,
+ bool force_chaining);
/**
* Return the buffer list.
*/
num_dw++; /* for emit_wait_idle below */
if (!ctx->sdma_uploads_in_progress &&
- (!ws->cs_check_space(ctx->dma_cs, num_dw) ||
+ (!ws->cs_check_space(ctx->dma_cs, num_dw, false) ||
ctx->dma_cs->used_vram + ctx->dma_cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma_cs, vram, gtt))) {
si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
ctx->vram = 0;
unsigned need_dwords = si_get_minimum_num_gfx_cs_dwords(ctx);
- if (!ctx->ws->cs_check_space(cs, need_dwords))
+ if (!ctx->ws->cs_check_space(cs, need_dwords, false))
si_flush_gfx_cs(ctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}
return true;
}
-static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw)
+static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw,
+ bool force_chaining)
{
struct amdgpu_ib *ib = amdgpu_ib(rcs);
struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
ib->max_check_space_size = MAX2(ib->max_check_space_size,
safe_byte_size);
- if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
- return false;
+ /* If force_chaining is true, we can't return. We have to chain. */
+ if (!force_chaining) {
+ if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
+ return false;
- ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
+ ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
- if (rcs->current.max_dw - rcs->current.cdw >= dw)
- return true;
+ if (rcs->current.max_dw - rcs->current.cdw >= dw)
+ return true;
+ }
- if (!amdgpu_cs_has_chaining(cs))
+ if (!amdgpu_cs_has_chaining(cs)) {
+ assert(!force_chaining);
return false;
+ }
/* Allocate a new chunk */
if (rcs->num_prev >= rcs->max_prev) {
return status;
}
-static bool radeon_drm_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw)
+static bool radeon_drm_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw,
+ bool force_chaining)
{
assert(rcs->current.cdw <= rcs->current.max_dw);
return rcs->current.max_dw - rcs->current.cdw >= dw;