fd_resource(info->dst.resource)->valid = true;
batch->needs_flush = true;
- fd_batch_flush(batch, false);
+ fd_batch_flush(batch);
fd_batch_reference(&batch, NULL);
return true;
fd_resource(info->dst.resource)->valid = true;
batch->needs_flush = true;
- fd_batch_flush(batch, false);
+ fd_batch_flush(batch);
fd_batch_reference(&batch, NULL);
return true;
}
}
- fd6_event_write(batch, ring, 0x1d, true);
+ fd6_event_write(batch, ring, UNK_1D, true);
}
static void
struct fd_context *ctx = batch->ctx;
unsigned size = 0;
- if (ctx->screen->reorder)
- util_queue_fence_init(&batch->flush_fence);
-
/* if kernel is too old to support unlimited # of cmd buffers, we
* have no option but to allocate large worst-case sizes so that
* we don't need to grow the ringbuffer. Performance is likely to
fd_hw_sample_reference(batch->ctx, &samp, NULL);
}
util_dynarray_fini(&batch->samples);
-
- if (batch->ctx->screen->reorder)
- util_queue_fence_destroy(&batch->flush_fence);
}
static void
foreach_batch(dep, cache, batch->dependents_mask) {
if (flush)
- fd_batch_flush(dep, false);
+ fd_batch_flush(dep);
fd_batch_reference(&dep, NULL);
}
{
DBG("%p", batch);
- fd_batch_sync(batch);
-
batch_flush_reset_dependencies(batch, false);
batch_reset_resources(batch);
sprintf(buf, "fd_batch<%u>", batch->seqno);
}
-void
-fd_batch_sync(struct fd_batch *batch)
-{
- if (!batch->ctx->screen->reorder)
- return;
- util_queue_fence_wait(&batch->flush_fence);
-}
-
-static void
-batch_flush_func(void *job, int id)
-{
- struct fd_batch *batch = job;
-
- DBG("%p", batch);
-
- fd_gmem_render_tiles(batch);
- batch_reset_resources(batch);
-}
-
-static void
-batch_cleanup_func(void *job, int id)
-{
- struct fd_batch *batch = job;
- fd_batch_reference(&batch, NULL);
-}
-
static void
batch_flush(struct fd_batch *batch)
{
fd_fence_ref(&batch->ctx->last_fence, batch->fence);
- if (batch->ctx->screen->reorder) {
- struct fd_batch *tmp = NULL;
- fd_batch_reference(&tmp, batch);
-
- if (!util_queue_is_initialized(&batch->ctx->flush_queue))
- util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
-
- util_queue_add_job(&batch->ctx->flush_queue,
- batch, &batch->flush_fence,
- batch_flush_func, batch_cleanup_func, 0);
- } else {
- fd_gmem_render_tiles(batch);
- batch_reset_resources(batch);
- }
+ fd_gmem_render_tiles(batch);
+ batch_reset_resources(batch);
debug_assert(batch->reference.count > 0);
* a fence to sync on
*/
void
-fd_batch_flush(struct fd_batch *batch, bool sync)
+fd_batch_flush(struct fd_batch *batch)
{
struct fd_batch *tmp = NULL;
bool newbatch = false;
fd_context_all_dirty(ctx);
}
- if (sync)
- fd_batch_sync(tmp);
-
fd_batch_reference(&tmp, NULL);
}
fd_batch_reference_locked(&b, rsc->write_batch);
mtx_unlock(&b->ctx->screen->lock);
- fd_batch_flush(b, true);
+ fd_batch_flush(b);
mtx_lock(&b->ctx->screen->lock);
fd_bc_invalidate_batch(b, false);
debug_assert(!batch->flushed);
if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
- fd_batch_flush(batch, true);
+ fd_batch_flush(batch);
return;
}
struct fd_ringbuffer *ring = batch->draw;
if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
- fd_batch_flush(batch, true);
+ fd_batch_flush(batch);
}
/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
struct fd_context *ctx;
- struct util_queue_fence flush_fence;
-
/* do we need to mem2gmem before rendering. We don't, if for example,
* there was a glClear() that invalidated the entire previous buffer
* contents. Keep track of which buffer(s) are cleared, or needs
struct fd_batch * fd_batch_create(struct fd_context *ctx, bool nondraw);
void fd_batch_reset(struct fd_batch *batch);
-void fd_batch_sync(struct fd_batch *batch);
-void fd_batch_flush(struct fd_batch *batch, bool sync);
+void fd_batch_flush(struct fd_batch *batch);
void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
void fd_batch_check_size(struct fd_batch *batch);
fd_context_unlock(ctx);
for (unsigned i = 0; i < n; i++) {
- fd_batch_flush(batches[i], false);
+ fd_batch_flush(batches[i]);
}
}
*/
mtx_unlock(&ctx->screen->lock);
DBG("%p: too many batches! flush forced!", flush_batch);
- fd_batch_flush(flush_batch, true);
+ fd_batch_flush(flush_batch);
mtx_lock(&ctx->screen->lock);
/* While the resources get cleaned up automatically, the flush_batch
batch->needs_out_fence_fd = true;
if (!ctx->screen->reorder) {
- fd_batch_flush(batch, true);
+ fd_batch_flush(batch);
} else if (flags & PIPE_FLUSH_DEFERRED) {
fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
} else {
fd_fence_ref(&ctx->last_fence, NULL);
- if (ctx->screen->reorder && util_queue_is_initialized(&ctx->flush_queue))
- util_queue_destroy(&ctx->flush_queue);
-
util_copy_framebuffer_state(&ctx->framebuffer, NULL);
fd_batch_reference(&ctx->batch, NULL); /* unref current batch */
fd_bc_invalidate_context(ctx);
struct fd_screen *screen;
struct fd_pipe *pipe;
- struct util_queue flush_queue;
-
struct blitter_context *blitter;
void *clear_rs_state;
struct primconvert_context *primconvert;
batch->needs_flush = true;
ctx->launch_grid(ctx, info);
- fd_batch_flush(batch, false);
+ fd_batch_flush(batch);
fd_batch_reference(&ctx->batch, save_batch);
fd_context_all_dirty(ctx);
static void fence_flush(struct pipe_fence_handle *fence)
{
if (fence->batch)
- fd_batch_flush(fence->batch, true);
+ fd_batch_flush(fence->batch);
debug_assert(!fence->batch);
}
* spin forever:
*/
if (aq->no_wait_cnt++ > 5)
- fd_batch_flush(rsc->write_batch, false);
+ fd_batch_flush(rsc->write_batch);
return false;
}
}
if (rsc->write_batch)
- fd_batch_flush(rsc->write_batch, true);
+ fd_batch_flush(rsc->write_batch);
/* get the result: */
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
* spin forever:
*/
if (hq->no_wait_cnt++ > 5)
- fd_batch_flush(rsc->write_batch, false);
+ fd_batch_flush(rsc->write_batch);
return false;
}
struct fd_resource *rsc = fd_resource(start->prsc);
if (rsc->write_batch)
- fd_batch_flush(rsc->write_batch, true);
+ fd_batch_flush(rsc->write_batch);
/* some piglit tests at least do query with no draws, I guess: */
if (!rsc->bo)
mtx_unlock(&ctx->screen->lock);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask)
- fd_batch_flush(batch, false);
+ fd_batch_flush(batch);
foreach_batch(batch, &ctx->screen->batch_cache, batch_mask) {
- fd_batch_sync(batch);
fd_batch_reference(&batches[batch->idx], NULL);
}
assert(rsc->batch_mask == 0);
} else if (write_batch) {
- fd_batch_flush(write_batch, true);
+ fd_batch_flush(write_batch);
}
fd_batch_reference(&write_batch, NULL);
if (usage & PIPE_TRANSFER_READ) {
fd_blit_to_staging(ctx, trans);
- struct fd_batch *batch = NULL;
-
- fd_context_lock(ctx);
- fd_batch_reference_locked(&batch, staging_rsc->write_batch);
- fd_context_unlock(ctx);
-
- /* we can't fd_bo_cpu_prep() until the blit to staging
- * is submitted to kernel.. in that case write_batch
- * wouldn't be NULL yet:
- */
- if (batch) {
- fd_batch_sync(batch);
- fd_batch_reference(&batch, NULL);
- }
-
fd_bo_cpu_prep(staging_rsc->bo, ctx->pipe,
DRM_FREEDRENO_PREP_READ);
}
* multiple times to the same surface), so we might as
* well go ahead and flush this one:
*/
- fd_batch_flush(old_batch, false);
+ fd_batch_flush(old_batch);
}
fd_batch_reference(&old_batch, NULL);
} else {
DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
framebuffer->cbufs[0], framebuffer->zsbuf);
- fd_batch_flush(ctx->batch, false);
+ fd_batch_flush(ctx->batch);
util_copy_framebuffer_state(&ctx->batch->framebuffer, cso);
}