fd_batch_flush(struct fd_batch *batch)
{
struct fd_batch *tmp = NULL;
- bool newbatch = false;
/* NOTE: we need to hold an extra ref across the body of flush,
* since the last ref to this batch could be dropped when cleaning
*/
fd_batch_reference(&tmp, batch);
- if (batch == batch->ctx->batch) {
- batch->ctx->batch = NULL;
- newbatch = true;
- }
-
batch_flush(tmp);
- if (newbatch) {
- struct fd_context *ctx = batch->ctx;
- struct fd_batch *new_batch;
-
- if (ctx->screen->reorder) {
- /* defer allocating new batch until one is needed for rendering
- * to avoid unused batches for apps that create many contexts
- */
- new_batch = NULL;
- } else {
- new_batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, false);
- util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer);
- }
-
- fd_batch_reference(&batch, NULL);
- ctx->batch = new_batch;
- fd_context_all_dirty(ctx);
+ if (batch == batch->ctx->batch) {
+ fd_batch_reference(&batch->ctx->batch, NULL);
}
fd_batch_reference(&tmp, NULL);
*/
struct fd_batch *flush_batch = NULL;
for (unsigned i = 0; i < ARRAY_SIZE(cache->batches); i++) {
- if ((cache->batches[i] == ctx->batch) ||
- !cache->batches[i]->needs_flush)
- continue;
if (!flush_batch || (cache->batches[i]->seqno < flush_batch->seqno))
fd_batch_reference_locked(&flush_batch, cache->batches[i]);
}
goto fail;
pctx->const_uploader = pctx->stream_uploader;
- if (!ctx->screen->reorder)
- ctx->batch = fd_bc_alloc_batch(&screen->batch_cache, ctx, false);
-
slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
fd_draw_init(pctx);
DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
framebuffer->cbufs[0], framebuffer->zsbuf);
fd_batch_flush(ctx->batch);
- util_copy_framebuffer_state(&ctx->batch->framebuffer, cso);
}
ctx->dirty |= FD_DIRTY_FRAMEBUFFER;