#include "freedreno_resource.h"
#include "freedreno_query_hw.h"
-static void
-batch_init(struct fd_batch *batch)
+static struct fd_ringbuffer *
+alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags)
{
struct fd_context *ctx = batch->ctx;
- unsigned size = 0;
-
- if (ctx->screen->reorder)
- util_queue_fence_init(&batch->flush_fence);
/* if kernel is too old to support unlimited # of cmd buffers, we
* have no option but to allocate large worst-case sizes so that
* we don't need to grow the ringbuffer. Performance is likely to
* suffer, but there is no good alternative.
*
- * XXX I think we can just require new enough kernel for this?
+ * Otherwise if supported, allocate a growable ring with initial
+ * size of zero.
*/
- if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
- (fd_mesa_debug & FD_DBG_NOGROW)){
- size = 0x100000;
+ if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) &&
+ !(fd_mesa_debug & FD_DBG_NOGROW)){
+ flags |= FD_RINGBUFFER_GROWABLE;
+ sz = 0;
}
+ return fd_submit_new_ringbuffer(batch->submit, sz, flags);
+}
+
+static void
+batch_init(struct fd_batch *batch)
+{
+ struct fd_context *ctx = batch->ctx;
+
batch->submit = fd_submit_new(ctx->pipe);
if (batch->nondraw) {
- batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
- FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
+ batch->draw = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
} else {
- batch->gmem = fd_submit_new_ringbuffer(batch->submit, size,
- FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
- batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
- FD_RINGBUFFER_GROWABLE);
+ batch->gmem = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY);
+ batch->draw = alloc_ring(batch, 0x100000, 0);
+ /* a6xx+ re-uses draw rb for both draw and binning pass: */
if (ctx->screen->gpu_id < 600) {
- batch->binning = fd_submit_new_ringbuffer(batch->submit,
- size, FD_RINGBUFFER_GROWABLE);
+ batch->binning = alloc_ring(batch, 0x100000, 0);
}
}
batch->fence = fd_fence_create(batch);
batch->cleared = 0;
+ batch->fast_cleared = 0;
batch->invalidated = 0;
batch->restore = batch->resolve = 0;
batch->needs_flush = false;
batch->flushed = false;
batch->gmem_reason = 0;
batch->num_draws = 0;
+ batch->num_vertices = 0;
+ batch->num_bins_per_pipe = 0;
+ batch->prim_strm_bits = 0;
+ batch->draw_strm_bits = 0;
batch->stage = FD_STAGE_NULL;
fd_reset_wfi(batch);
util_dynarray_init(&batch->draw_patches, NULL);
+ util_dynarray_init(&batch->fb_read_patches, NULL);
+
+ if (is_a2xx(ctx->screen)) {
+ util_dynarray_init(&batch->shader_patches, NULL);
+ util_dynarray_init(&batch->gmem_patches, NULL);
+ }
if (is_a3xx(ctx->screen))
util_dynarray_init(&batch->rbrc_patches, NULL);
- util_dynarray_init(&batch->gmem_patches, NULL);
-
assert(batch->resources->entries == 0);
util_dynarray_init(&batch->samples, NULL);
+
+ list_inithead(&batch->log_chunks);
}
struct fd_batch *
batch_init(batch);
+ fd_screen_assert_locked(ctx->screen);
+ if (BATCH_DEBUG) {
+ _mesa_set_add(ctx->screen->live_batches, batch);
+ }
+
return batch;
}
/* in case batch wasn't flushed but fence was created: */
fd_fence_populate(batch->fence, 0, -1);
- fd_fence_ref(NULL, &batch->fence, NULL);
+ fd_fence_ref(&batch->fence, NULL);
fd_ringbuffer_del(batch->draw);
if (!batch->nondraw) {
debug_assert(!batch->binning);
debug_assert(!batch->gmem);
}
+
if (batch->lrz_clear) {
fd_ringbuffer_del(batch->lrz_clear);
batch->lrz_clear = NULL;
}
+ if (batch->epilogue) {
+ fd_ringbuffer_del(batch->epilogue);
+ batch->epilogue = NULL;
+ }
+
+ if (batch->tile_setup) {
+ fd_ringbuffer_del(batch->tile_setup);
+ batch->tile_setup = NULL;
+ }
+
+ if (batch->tile_fini) {
+ fd_ringbuffer_del(batch->tile_fini);
+ batch->tile_fini = NULL;
+ }
+
+ if (batch->tessellation) {
+ fd_bo_del(batch->tessfactor_bo);
+ fd_bo_del(batch->tessparam_bo);
+ fd_ringbuffer_del(batch->tess_addrs_constobj);
+ }
+
fd_submit_del(batch->submit);
util_dynarray_fini(&batch->draw_patches);
+ util_dynarray_fini(&batch->fb_read_patches);
+
+ if (is_a2xx(batch->ctx->screen)) {
+ util_dynarray_fini(&batch->shader_patches);
+ util_dynarray_fini(&batch->gmem_patches);
+ }
if (is_a3xx(batch->ctx->screen))
util_dynarray_fini(&batch->rbrc_patches);
- util_dynarray_fini(&batch->gmem_patches);
-
while (batch->samples.size > 0) {
struct fd_hw_sample *samp =
util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
}
util_dynarray_fini(&batch->samples);
- if (batch->ctx->screen->reorder)
- util_queue_fence_destroy(&batch->flush_fence);
+ assert(list_is_empty(&batch->log_chunks));
}
static void
foreach_batch(dep, cache, batch->dependents_mask) {
if (flush)
- fd_batch_flush(dep, false, false);
+ fd_batch_flush(dep);
fd_batch_reference(&dep, NULL);
}
static void
batch_reset_resources_locked(struct fd_batch *batch)
{
- pipe_mutex_assert_locked(batch->ctx->screen->lock);
+ fd_screen_assert_locked(batch->ctx->screen);
set_foreach(batch->resources, entry) {
struct fd_resource *rsc = (struct fd_resource *)entry->key;
static void
batch_reset_resources(struct fd_batch *batch)
{
- mtx_lock(&batch->ctx->screen->lock);
+ fd_screen_lock(batch->ctx->screen);
batch_reset_resources_locked(batch);
- mtx_unlock(&batch->ctx->screen->lock);
+ fd_screen_unlock(batch->ctx->screen);
}
static void
{
DBG("%p", batch);
- fd_batch_sync(batch);
-
batch_flush_reset_dependencies(batch, false);
batch_reset_resources(batch);
fd_context_assert_locked(batch->ctx);
+ if (BATCH_DEBUG) {
+ _mesa_set_remove_key(ctx->screen->live_batches, batch);
+ }
+
fd_bc_invalidate_batch(batch, true);
batch_reset_resources_locked(batch);
void
__fd_batch_describe(char* buf, const struct fd_batch *batch)
{
- util_sprintf(buf, "fd_batch<%u>", batch->seqno);
-}
-
-void
-fd_batch_sync(struct fd_batch *batch)
-{
- if (!batch->ctx->screen->reorder)
- return;
- util_queue_fence_wait(&batch->flush_fence);
-}
-
-static void
-batch_flush_func(void *job, int id)
-{
- struct fd_batch *batch = job;
-
- DBG("%p", batch);
-
- fd_gmem_render_tiles(batch);
- batch_reset_resources(batch);
-}
-
-static void
-batch_cleanup_func(void *job, int id)
-{
- struct fd_batch *batch = job;
- fd_batch_reference(&batch, NULL);
+ sprintf(buf, "fd_batch<%u>", batch->seqno);
}
static void
-batch_flush(struct fd_batch *batch, bool force)
+batch_flush(struct fd_batch *batch)
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
batch->flushed = true;
- if (batch->ctx->screen->reorder) {
- struct fd_batch *tmp = NULL;
- fd_batch_reference(&tmp, batch);
+ fd_fence_ref(&batch->ctx->last_fence, batch->fence);
- if (!util_queue_is_initialized(&batch->ctx->flush_queue))
- util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
-
- util_queue_add_job(&batch->ctx->flush_queue,
- batch, &batch->flush_fence,
- batch_flush_func, batch_cleanup_func);
- } else {
- fd_gmem_render_tiles(batch);
- batch_reset_resources(batch);
- }
+ fd_gmem_render_tiles(batch);
+ batch_reset_resources(batch);
debug_assert(batch->reference.count > 0);
- mtx_lock(&batch->ctx->screen->lock);
+ fd_screen_lock(batch->ctx->screen);
fd_bc_invalidate_batch(batch, false);
- mtx_unlock(&batch->ctx->screen->lock);
+ fd_screen_unlock(batch->ctx->screen);
}
/* NOTE: could drop the last ref to batch
* a fence to sync on
*/
void
-fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
+fd_batch_flush(struct fd_batch *batch)
{
struct fd_batch *tmp = NULL;
- bool newbatch = false;
/* NOTE: we need to hold an extra ref across the body of flush,
* since the last ref to this batch could be dropped when cleaning
*/
fd_batch_reference(&tmp, batch);
- if (batch == batch->ctx->batch) {
- batch->ctx->batch = NULL;
- newbatch = true;
- }
-
- batch_flush(tmp, force);
-
- if (newbatch) {
- struct fd_context *ctx = batch->ctx;
- struct fd_batch *new_batch;
-
- if (ctx->screen->reorder) {
- /* defer allocating new batch until one is needed for rendering
- * to avoid unused batches for apps that create many contexts
- */
- new_batch = NULL;
- } else {
- new_batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, false);
- util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer);
- }
+ batch_flush(tmp);
- fd_batch_reference(&batch, NULL);
- ctx->batch = new_batch;
- fd_context_all_dirty(ctx);
+ if (batch == batch->ctx->batch) {
+ fd_batch_reference(&batch->ctx->batch, NULL);
}
- if (sync)
- fd_batch_sync(tmp);
-
fd_batch_reference(&tmp, NULL);
}
-/* does 'batch' depend directly or indirectly on 'other' ? */
-static bool
-batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
+/* find a batches dependents mask, including recursive dependencies: */
+static uint32_t
+recursive_dependents_mask(struct fd_batch *batch)
{
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct fd_batch *dep;
-
- if (batch->dependents_mask & (1 << other->idx))
- return true;
+ uint32_t dependents_mask = batch->dependents_mask;
foreach_batch(dep, cache, batch->dependents_mask)
- if (batch_depends_on(batch, dep))
- return true;
+ dependents_mask |= recursive_dependents_mask(dep);
- return false;
+ return dependents_mask;
}
void
fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
{
+ fd_screen_assert_locked(batch->ctx->screen);
+
if (batch->dependents_mask & (1 << dep->idx))
return;
/* a loop should not be possible */
- debug_assert(!batch_depends_on(dep, batch));
+ debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
struct fd_batch *other = NULL;
fd_batch_reference_locked(&other, dep);
flush_write_batch(struct fd_resource *rsc)
{
struct fd_batch *b = NULL;
- fd_batch_reference(&b, rsc->write_batch);
+ fd_batch_reference_locked(&b, rsc->write_batch);
- mtx_unlock(&b->ctx->screen->lock);
- fd_batch_flush(b, true, false);
- mtx_lock(&b->ctx->screen->lock);
+ fd_screen_unlock(b->ctx->screen);
+ fd_batch_flush(b);
+ fd_screen_lock(b->ctx->screen);
fd_bc_invalidate_batch(b, false);
fd_batch_reference_locked(&b, NULL);
}
+static void
+fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
+{
+
+ if (likely(fd_batch_references_resource(batch, rsc))) {
+ debug_assert(_mesa_set_search(batch->resources, rsc));
+ return;
+ }
+
+ debug_assert(!_mesa_set_search(batch->resources, rsc));
+
+ _mesa_set_add(batch->resources, rsc);
+ rsc->batch_mask |= (1 << batch->idx);
+}
+
void
-fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
+fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
{
- pipe_mutex_assert_locked(batch->ctx->screen->lock);
+ fd_screen_assert_locked(batch->ctx->screen);
if (rsc->stencil)
- fd_batch_resource_used(batch, rsc->stencil, write);
+ fd_batch_resource_write(batch, rsc->stencil);
- DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
+ DBG("%p: write %p", batch, rsc);
- if (write)
- rsc->valid = true;
+ rsc->valid = true;
/* note, invalidate write batch, to avoid further writes to rsc
* resulting in a write-after-read hazard.
*/
+ /* if we are pending read or write by any other batch: */
+ if (unlikely(rsc->batch_mask & ~(1 << batch->idx))) {
+ struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
+ struct fd_batch *dep;
- if (write) {
- /* if we are pending read or write by any other batch: */
- if (rsc->batch_mask & ~(1 << batch->idx)) {
- struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
- struct fd_batch *dep;
-
- if (rsc->write_batch && rsc->write_batch != batch)
- flush_write_batch(rsc);
-
- foreach_batch(dep, cache, rsc->batch_mask) {
- struct fd_batch *b = NULL;
- if (dep == batch)
- continue;
- /* note that batch_add_dep could flush and unref dep, so
- * we need to hold a reference to keep it live for the
- * fd_bc_invalidate_batch()
- */
- fd_batch_reference(&b, dep);
- fd_batch_add_dep(batch, b);
- fd_bc_invalidate_batch(b, false);
- fd_batch_reference_locked(&b, NULL);
- }
- }
- fd_batch_reference_locked(&rsc->write_batch, batch);
- } else {
- /* If reading a resource pending a write, go ahead and flush the
- * writer. This avoids situations where we end up having to
- * flush the current batch in _resource_used()
- */
if (rsc->write_batch && rsc->write_batch != batch)
flush_write_batch(rsc);
- }
- if (rsc->batch_mask & (1 << batch->idx)) {
- debug_assert(_mesa_set_search(batch->resources, rsc));
- return;
+ foreach_batch(dep, cache, rsc->batch_mask) {
+ struct fd_batch *b = NULL;
+ if (dep == batch)
+ continue;
+ /* note that batch_add_dep could flush and unref dep, so
+ * we need to hold a reference to keep it live for the
+ * fd_bc_invalidate_batch()
+ */
+ fd_batch_reference(&b, dep);
+ fd_batch_add_dep(batch, b);
+ fd_bc_invalidate_batch(b, false);
+ fd_batch_reference_locked(&b, NULL);
+ }
}
+ fd_batch_reference_locked(&rsc->write_batch, batch);
- debug_assert(!_mesa_set_search(batch->resources, rsc));
+ fd_batch_add_resource(batch, rsc);
+}
- _mesa_set_add(batch->resources, rsc);
- rsc->batch_mask |= (1 << batch->idx);
+void
+fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
+{
+ fd_screen_assert_locked(batch->ctx->screen);
+
+ if (rsc->stencil)
+ fd_batch_resource_read(batch, rsc->stencil);
+
+ DBG("%p: read %p", batch, rsc);
+
+ /* If reading a resource pending a write, go ahead and flush the
+ * writer. This avoids situations where we end up having to
+ * flush the current batch in _resource_used()
+ */
+ if (unlikely(rsc->write_batch && rsc->write_batch != batch))
+ flush_write_batch(rsc);
+
+ fd_batch_add_resource(batch, rsc);
}
void
debug_assert(!batch->flushed);
if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
- fd_batch_flush(batch, true, false);
+ fd_batch_flush(batch);
return;
}
struct fd_ringbuffer *ring = batch->draw;
if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
- fd_batch_flush(batch, true, false);
+ fd_batch_flush(batch);
}
/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already