fd_resource(info->dst.resource)->valid = true;
batch->needs_flush = true;
- fd_batch_flush(batch, false, false);
+ fd_batch_flush(batch, false);
fd_batch_reference(&batch, NULL);
return true;
fd_resource(info->dst.resource)->valid = true;
batch->needs_flush = true;
- fd_batch_flush(batch, false, false);
+ fd_batch_flush(batch, false);
fd_batch_reference(&batch, NULL);
return true;
foreach_batch(dep, cache, batch->dependents_mask) {
if (flush)
- fd_batch_flush(dep, false, false);
+ fd_batch_flush(dep, false);
fd_batch_reference(&dep, NULL);
}
}
static void
-batch_flush(struct fd_batch *batch, bool force)
+batch_flush(struct fd_batch *batch)
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
* a fence to sync on
*/
void
-fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
+fd_batch_flush(struct fd_batch *batch, bool sync)
{
struct fd_batch *tmp = NULL;
bool newbatch = false;
newbatch = true;
}
- batch_flush(tmp, force);
+ batch_flush(tmp);
if (newbatch) {
struct fd_context *ctx = batch->ctx;
fd_batch_reference_locked(&b, rsc->write_batch);
mtx_unlock(&b->ctx->screen->lock);
- fd_batch_flush(b, true, false);
+ fd_batch_flush(b, true);
mtx_lock(&b->ctx->screen->lock);
fd_bc_invalidate_batch(b, false);
debug_assert(!batch->flushed);
if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
- fd_batch_flush(batch, true, false);
+ fd_batch_flush(batch, true);
return;
}
struct fd_ringbuffer *ring = batch->draw;
if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
- fd_batch_flush(batch, true, false);
+ fd_batch_flush(batch, true);
}
/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
void fd_batch_reset(struct fd_batch *batch);
void fd_batch_sync(struct fd_batch *batch);
-void fd_batch_flush(struct fd_batch *batch, bool sync, bool force);
+void fd_batch_flush(struct fd_batch *batch, bool sync);
void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
void fd_batch_check_size(struct fd_batch *batch);
fd_context_unlock(ctx);
for (unsigned i = 0; i < n; i++) {
- fd_batch_flush(batches[i], false, false);
+ fd_batch_flush(batches[i], false);
}
}
*/
mtx_unlock(&ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
- fd_batch_flush(flush_batch, true, false);
+ fd_batch_flush(flush_batch, true);
mtx_lock(&ctx->screen->lock);
/* While the resources get cleaned up automatically, the flush_batch
batch->needs_out_fence_fd = true;
if (!ctx->screen->reorder) {
- fd_batch_flush(batch, true, false);
+ fd_batch_flush(batch, true);
} else if (flags & PIPE_FLUSH_DEFERRED) {
fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
} else {
batch->needs_flush = true;
ctx->launch_grid(ctx, info);
- fd_batch_flush(batch, false, false);
+ fd_batch_flush(batch, false);
fd_batch_reference(&ctx->batch, save_batch);
fd_context_all_dirty(ctx);
static void fence_flush(struct pipe_fence_handle *fence)
{
if (fence->batch)
- fd_batch_flush(fence->batch, true, true);
+ fd_batch_flush(fence->batch, true);
debug_assert(!fence->batch);
}
* spin forever:
*/
if (aq->no_wait_cnt++ > 5)
- fd_batch_flush(rsc->write_batch, false, false);
+ fd_batch_flush(rsc->write_batch, false);
return false;
}
}
if (rsc->write_batch)
- fd_batch_flush(rsc->write_batch, true, false);
+ fd_batch_flush(rsc->write_batch, true);
/* get the result: */
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
* spin forever:
*/
if (hq->no_wait_cnt++ > 5)
- fd_batch_flush(rsc->write_batch, false, false);
+ fd_batch_flush(rsc->write_batch, false);
return false;
}
struct fd_resource *rsc = fd_resource(start->prsc);
if (rsc->write_batch)
- fd_batch_flush(rsc->write_batch, true, false);
+ fd_batch_flush(rsc->write_batch, true);
/* some piglit tests at least do query with no draws, I guess: */
if (!rsc->bo)
mtx_unlock(&ctx->screen->lock);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
- fd_batch_flush(batch, false, false);
+ fd_batch_flush(batch, false);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
fd_batch_sync(batch);
}
assert(rsc->batch_mask == 0);
} else if (write_batch) {
- fd_batch_flush(write_batch, true, false);
+ fd_batch_flush(write_batch, true);
}
fd_batch_reference(&write_batch, NULL);
* multiple times to the same surface), so we might as
* well go ahead and flush this one:
*/
- fd_batch_flush(old_batch, false, false);
+ fd_batch_flush(old_batch, false);
}
fd_batch_reference(&old_batch, NULL);
} else {
DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
framebuffer->cbufs[0], framebuffer->zsbuf);
- fd_batch_flush(ctx->batch, false, false);
+ fd_batch_flush(ctx->batch, false);
util_copy_framebuffer_state(&ctx->batch->framebuffer, cso);
}