unsigned i;
/* make sure that the gfx ring is only one active */
- if (rctx->b.dma.cs && rctx->b.dma.cs->cdw) {
+ if (radeon_emitted(rctx->b.dma.cs, 0)) {
rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
}
void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
boolean count_draw_in)
{
- struct radeon_winsys_cs *dma = ctx->b.dma.cs;
-
/* Flush the DMA IB if it's not empty. */
- if (dma && dma->cdw)
+ if (radeon_emitted(ctx->b.dma.cs, 0))
ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
if (!ctx->b.ws->cs_memory_below_limit(ctx->b.gfx.cs, ctx->b.vram, ctx->b.gtt)) {
struct r600_context *ctx = context;
struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
- if (cs->cdw == ctx->b.initial_gfx_cs_size && !fence)
+ if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size) && !fence)
return;
r600_preflush_suspend_features(&ctx->b);
}
/* make sure that the gfx ring is only one active */
- if (rctx->b.dma.cs && rctx->b.dma.cs->cdw) {
+ if (radeon_emitted(rctx->b.dma.cs, 0)) {
rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
}
if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
return TRUE;
}
- if (ctx->dma.cs && ctx->dma.cs->cdw &&
+ if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, buf, usage)) {
return TRUE;
}
rusage = RADEON_USAGE_WRITE;
}
- if (ctx->gfx.cs->cdw != ctx->initial_gfx_cs_size &&
+ if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
busy = true;
}
}
- if (ctx->dma.cs &&
- ctx->dma.cs->cdw &&
+ if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
}
/* Flush the GFX IB if DMA depends on it. */
- if (ctx->gfx.cs->cdw > ctx->initial_gfx_cs_size &&
+ if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
((dst &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, dst->buf,
RADEON_USAGE_READWRITE)) ||
r600_need_dma_space(rctx, 1, NULL, NULL);
- if (cs->cdw == 0) /* empty queue */
+ if (!radeon_emitted(cs, 0)) /* empty queue */
return;
/* NOP waits for idle on Evergreen and later. */
struct r600_common_context *rctx = (struct r600_common_context *)ctx;
struct radeon_winsys_cs *cs = rctx->dma.cs;
- if (cs->cdw)
+ if (radeon_emitted(cs, 0))
rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
if (fence)
rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
RVID_ERR("Can't create feedback buffer.\n");
return;
}
- if (!enc->cs->cdw)
+ if (!radeon_emitted(enc->cs, 0))
enc->session(enc);
enc->encode(enc);
enc->feedback(enc);
unsigned num_registers, uint32_t *out);
};
+static inline bool radeon_emitted(struct radeon_winsys_cs *cs, unsigned num_dw)
+{
+ return cs && cs->cdw > num_dw;
+}
static inline void radeon_emit(struct radeon_winsys_cs *cs, uint32_t value)
{
struct radeon_winsys_cs *dma = ctx->b.dma.cs;
/* Flush the DMA IB if it's not empty. */
- if (dma && dma->cdw)
+ if (radeon_emitted(dma, 0))
ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
/* There are two memory usage counters in the winsys for all buffers
ctx->gfx_flush_in_progress = true;
- if (cs->cdw == ctx->b.initial_gfx_cs_size &&
+ if (!radeon_emitted(cs, ctx->b.initial_gfx_cs_size) &&
(!fence || ctx->last_gfx_fence)) {
if (fence)
ws->fence_reference(fence, ctx->last_gfx_fence);