#include "freedreno_resource.h"
#include "freedreno_query_hw.h"
-static void
-batch_init(struct fd_batch *batch)
+static struct fd_ringbuffer *
+alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags)
{
struct fd_context *ctx = batch->ctx;
- enum fd_ringbuffer_flags flags = 0;
- unsigned size = 0;
/* if kernel is too old to support unlimited # of cmd buffers, we
* have no option but to allocate large worst-case sizes so that
* we don't need to grow the ringbuffer. Performance is likely to
* suffer, but there is no good alternative.
*
- * XXX I think we can just require new enough kernel for this?
+ * Otherwise if supported, allocate a growable ring with initial
+ * size of zero.
*/
- if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
- (fd_mesa_debug & FD_DBG_NOGROW)){
- size = 0x100000;
- } else {
- flags = FD_RINGBUFFER_GROWABLE;
+ if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) &&
+ !(fd_mesa_debug & FD_DBG_NOGROW)){
+ flags |= FD_RINGBUFFER_GROWABLE;
+ sz = 0;
}
+ return fd_submit_new_ringbuffer(batch->submit, sz, flags);
+}
+
+static void
+batch_init(struct fd_batch *batch)
+{
+ struct fd_context *ctx = batch->ctx;
+
batch->submit = fd_submit_new(ctx->pipe);
if (batch->nondraw) {
- batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
- FD_RINGBUFFER_PRIMARY | flags);
+ batch->draw = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
} else {
- batch->gmem = fd_submit_new_ringbuffer(batch->submit, size,
- FD_RINGBUFFER_PRIMARY | flags);
- batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
- flags);
+ batch->gmem = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
+ batch->draw = alloc_ring(batch, 0x100000, 0);
+ /* a6xx+ re-uses draw rb for both draw and binning pass: */
if (ctx->screen->gpu_id < 600) {
- batch->binning = fd_submit_new_ringbuffer(batch->submit,
- size, flags);
+ batch->binning = alloc_ring(batch, 0x100000, 0);
}
}
batch_init(batch);
+ fd_screen_assert_locked(ctx->screen);
+ if (BATCH_DEBUG) {
+ _mesa_set_add(ctx->screen->live_batches, batch);
+ }
+
return batch;
}
batch->lrz_clear = NULL;
}
+ if (batch->epilogue) {
+ fd_ringbuffer_del(batch->epilogue);
+ batch->epilogue = NULL;
+ }
+
if (batch->tile_setup) {
fd_ringbuffer_del(batch->tile_setup);
batch->tile_setup = NULL;
fd_context_assert_locked(batch->ctx);
+ if (BATCH_DEBUG) {
+ _mesa_set_remove_key(ctx->screen->live_batches, batch);
+ }
+
fd_bc_invalidate_batch(batch, true);
batch_reset_resources_locked(batch);
fd_batch_reference_locked(&b, NULL);
}
-static bool
-fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
-{
- return rsc->batch_mask & (1 << batch->idx);
-}
-
static void
fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
{
}
void
-fd_batch_resource_read(struct fd_batch *batch, struct fd_resource *rsc)
+fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
{
fd_screen_assert_locked(batch->ctx->screen);
- /* Early out, if we hit this then we know we don't have anyone else
- * writing to it (since both _write and _read flush other writers), and
- * that we've already recursed for stencil.
- */
- if (likely(fd_batch_references_resource(batch, rsc)))
- return;
-
if (rsc->stencil)
fd_batch_resource_read(batch, rsc->stencil);