X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Ffreedreno_batch.c;h=86e8b5b7834e038a6ceead8193a670fd8a03e051;hp=6d17a422fc44ef91ae547f6c2085455b1f167083;hb=28079970f2ce151673701c84ad765a235ab5ba50;hpb=eeafaf2d37cdc7f83f997e8babd8f770243ecf25 diff --git a/src/gallium/drivers/freedreno/freedreno_batch.c b/src/gallium/drivers/freedreno/freedreno_batch.c index 6d17a422fc4..86e8b5b7834 100644 --- a/src/gallium/drivers/freedreno/freedreno_batch.c +++ b/src/gallium/drivers/freedreno/freedreno_batch.c @@ -25,109 +25,496 @@ */ #include "util/list.h" +#include "util/set.h" +#include "util/hash_table.h" #include "util/u_string.h" #include "freedreno_batch.h" #include "freedreno_context.h" +#include "freedreno_fence.h" #include "freedreno_resource.h" +#include "freedreno_query_hw.h" + +static struct fd_ringbuffer * +alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags) +{ + struct fd_context *ctx = batch->ctx; + + /* if kernel is too old to support unlimited # of cmd buffers, we + * have no option but to allocate large worst-case sizes so that + * we don't need to grow the ringbuffer. Performance is likely to + * suffer, but there is no good alternative. + * + * Otherwise if supported, allocate a growable ring with initial + * size of zero. + */ + if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) && + !(fd_mesa_debug & FD_DBG_NOGROW)){ + flags |= FD_RINGBUFFER_GROWABLE; + sz = 0; + } + + return fd_submit_new_ringbuffer(batch->submit, sz, flags); +} + +static void +batch_init(struct fd_batch *batch) +{ + struct fd_context *ctx = batch->ctx; + + batch->submit = fd_submit_new(ctx->pipe); + if (batch->nondraw) { + batch->draw = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY); + } else { + batch->gmem = alloc_ring(batch, 0x100000, FD_RINGBUFFER_PRIMARY); + batch->draw = alloc_ring(batch, 0x100000, 0); + + /* a6xx+ re-uses draw rb for both draw and binning pass: */ + if (ctx->screen->gpu_id < 600) { + batch->binning = alloc_ring(batch, 0x100000, 0); + } + } + + batch->in_fence_fd = -1; + batch->fence = fd_fence_create(batch); + + batch->cleared = 0; + batch->fast_cleared = 0; + batch->invalidated = 0; + batch->restore = batch->resolve = 0; + batch->needs_flush = false; + batch->flushed = false; + batch->gmem_reason = 0; + batch->num_draws = 0; + batch->num_vertices = 0; + batch->num_bins_per_pipe = 0; + batch->prim_strm_bits = 0; + batch->draw_strm_bits = 0; + batch->stage = FD_STAGE_NULL; + + fd_reset_wfi(batch); + + util_dynarray_init(&batch->draw_patches, NULL); + util_dynarray_init(&batch->fb_read_patches, NULL); + + if (is_a2xx(ctx->screen)) { + util_dynarray_init(&batch->shader_patches, NULL); + util_dynarray_init(&batch->gmem_patches, NULL); + } + + if (is_a3xx(ctx->screen)) + util_dynarray_init(&batch->rbrc_patches, NULL); + + assert(batch->resources->entries == 0); + + util_dynarray_init(&batch->samples, NULL); + + list_inithead(&batch->log_chunks); +} struct fd_batch * -fd_batch_create(struct fd_context *ctx) +fd_batch_create(struct fd_context *ctx, bool nondraw) { struct fd_batch *batch = CALLOC_STRUCT(fd_batch); - static unsigned seqno = 0; - unsigned size = 0; if (!batch) return NULL; + DBG("%p", batch); + pipe_reference_init(&batch->reference, 1); - batch->seqno = ++seqno; batch->ctx = ctx; + batch->nondraw = nondraw; - /* if kernel is too old to support unlimited # of cmd buffers, we - * have no option but to allocate large worst-case sizes so that - * we don't need to grow the ringbuffer. Performance is likely to - * suffer, but there is no good alternative. - */ - if (fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) { - size = 0x100000; + batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer, + _mesa_key_pointer_equal); + + batch_init(batch); + + fd_screen_assert_locked(ctx->screen); + if (BATCH_DEBUG) { + _mesa_set_add(ctx->screen->live_batches, batch); } - batch->draw = fd_ringbuffer_new(ctx->screen->pipe, size); - batch->binning = fd_ringbuffer_new(ctx->screen->pipe, size); - batch->gmem = fd_ringbuffer_new(ctx->screen->pipe, size); + return batch; +} + +static void +batch_fini(struct fd_batch *batch) +{ + DBG("%p", batch); - fd_ringbuffer_set_parent(batch->gmem, NULL); - fd_ringbuffer_set_parent(batch->draw, batch->gmem); - fd_ringbuffer_set_parent(batch->binning, batch->gmem); + pipe_resource_reference(&batch->query_buf, NULL); - list_inithead(&batch->used_resources); + if (batch->in_fence_fd != -1) + close(batch->in_fence_fd); - return batch; + /* in case batch wasn't flushed but fence was created: */ + fd_fence_populate(batch->fence, 0, -1); + + fd_fence_ref(&batch->fence, NULL); + + fd_ringbuffer_del(batch->draw); + if (!batch->nondraw) { + if (batch->binning) + fd_ringbuffer_del(batch->binning); + fd_ringbuffer_del(batch->gmem); + } else { + debug_assert(!batch->binning); + debug_assert(!batch->gmem); + } + + if (batch->lrz_clear) { + fd_ringbuffer_del(batch->lrz_clear); + batch->lrz_clear = NULL; + } + + if (batch->epilogue) { + fd_ringbuffer_del(batch->epilogue); + batch->epilogue = NULL; + } + + if (batch->tile_setup) { + fd_ringbuffer_del(batch->tile_setup); + batch->tile_setup = NULL; + } + + if (batch->tile_fini) { + fd_ringbuffer_del(batch->tile_fini); + batch->tile_fini = NULL; + } + + if (batch->tessellation) { + fd_bo_del(batch->tessfactor_bo); + fd_bo_del(batch->tessparam_bo); + fd_ringbuffer_del(batch->tess_addrs_constobj); + } + + fd_submit_del(batch->submit); + + util_dynarray_fini(&batch->draw_patches); + util_dynarray_fini(&batch->fb_read_patches); + + if (is_a2xx(batch->ctx->screen)) { + util_dynarray_fini(&batch->shader_patches); + util_dynarray_fini(&batch->gmem_patches); + } + + if (is_a3xx(batch->ctx->screen)) + util_dynarray_fini(&batch->rbrc_patches); + + while (batch->samples.size > 0) { + struct fd_hw_sample *samp = + util_dynarray_pop(&batch->samples, struct fd_hw_sample *); + fd_hw_sample_reference(batch->ctx, &samp, NULL); + } + util_dynarray_fini(&batch->samples); + + assert(list_is_empty(&batch->log_chunks)); +} + +static void +batch_flush_reset_dependencies(struct fd_batch *batch, bool flush) +{ + struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache; + struct fd_batch *dep; + + foreach_batch(dep, cache, batch->dependents_mask) { + if (flush) + fd_batch_flush(dep); + fd_batch_reference(&dep, NULL); + } + + batch->dependents_mask = 0; +} + +static void +batch_reset_resources_locked(struct fd_batch *batch) +{ + fd_screen_assert_locked(batch->ctx->screen); + + set_foreach(batch->resources, entry) { + struct fd_resource *rsc = (struct fd_resource *)entry->key; + _mesa_set_remove(batch->resources, entry); + debug_assert(rsc->batch_mask & (1 << batch->idx)); + rsc->batch_mask &= ~(1 << batch->idx); + if (rsc->write_batch == batch) + fd_batch_reference_locked(&rsc->write_batch, NULL); + } +} + +static void +batch_reset_resources(struct fd_batch *batch) +{ + fd_screen_lock(batch->ctx->screen); + batch_reset_resources_locked(batch); + fd_screen_unlock(batch->ctx->screen); +} + +static void +batch_reset(struct fd_batch *batch) +{ + DBG("%p", batch); + + batch_flush_reset_dependencies(batch, false); + batch_reset_resources(batch); + + batch_fini(batch); + batch_init(batch); +} + +void +fd_batch_reset(struct fd_batch *batch) +{ + if (batch->needs_flush) + batch_reset(batch); } void __fd_batch_destroy(struct fd_batch *batch) { - fd_ringbuffer_del(batch->draw); - fd_ringbuffer_del(batch->binning); - fd_ringbuffer_del(batch->gmem); + struct fd_context *ctx = batch->ctx; + + DBG("%p", batch); + + fd_context_assert_locked(batch->ctx); + + if (BATCH_DEBUG) { + _mesa_set_remove_key(ctx->screen->live_batches, batch); + } + + fd_bc_invalidate_batch(batch, true); + + batch_reset_resources_locked(batch); + debug_assert(batch->resources->entries == 0); + _mesa_set_destroy(batch->resources, NULL); + + fd_context_unlock(ctx); + batch_flush_reset_dependencies(batch, false); + debug_assert(batch->dependents_mask == 0); + util_copy_framebuffer_state(&batch->framebuffer, NULL); + batch_fini(batch); free(batch); + fd_context_lock(ctx); } void __fd_batch_describe(char* buf, const struct fd_batch *batch) { - util_sprintf(buf, "fd_batch<%u>", batch->seqno); + sprintf(buf, "fd_batch<%u>", batch->seqno); } +static void +batch_flush(struct fd_batch *batch) +{ + DBG("%p: needs_flush=%d", batch, batch->needs_flush); + + if (batch->flushed) + return; + + batch->needs_flush = false; + + /* close out the draw cmds by making sure any active queries are + * paused: + */ + fd_batch_set_stage(batch, FD_STAGE_NULL); + + batch_flush_reset_dependencies(batch, true); + + batch->flushed = true; + + fd_fence_ref(&batch->ctx->last_fence, batch->fence); + + fd_gmem_render_tiles(batch); + batch_reset_resources(batch); + + debug_assert(batch->reference.count > 0); + + fd_screen_lock(batch->ctx->screen); + fd_bc_invalidate_batch(batch, false); + fd_screen_unlock(batch->ctx->screen); +} + +/* NOTE: could drop the last ref to batch + * + * @sync: synchronize with flush_queue, ensures batch is *actually* flushed + * to kernel before this returns, as opposed to just being queued to be + * flushed + * @force: force a flush even if no rendering, mostly useful if you need + * a fence to sync on + */ void fd_batch_flush(struct fd_batch *batch) { - struct fd_resource *rsc, *rsc_tmp; + struct fd_batch *tmp = NULL; + + /* NOTE: we need to hold an extra ref across the body of flush, + * since the last ref to this batch could be dropped when cleaning + * up used_resources + */ + fd_batch_reference(&tmp, batch); - fd_gmem_render_tiles(batch->ctx); + batch_flush(tmp); - /* go through all the used resources and clear their reading flag */ - LIST_FOR_EACH_ENTRY_SAFE(rsc, rsc_tmp, &batch->used_resources, list) { - debug_assert(rsc->pending_batch == batch); - debug_assert(rsc->status != 0); - rsc->status = 0; - fd_batch_reference(&rsc->pending_batch, NULL); - list_delinit(&rsc->list); + if (batch == batch->ctx->batch) { + fd_batch_reference(&batch->ctx->batch, NULL); } - assert(LIST_IS_EMPTY(&batch->used_resources)); + fd_batch_reference(&tmp, NULL); +} + +/* find a batches dependents mask, including recursive dependencies: */ +static uint32_t +recursive_dependents_mask(struct fd_batch *batch) +{ + struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache; + struct fd_batch *dep; + uint32_t dependents_mask = batch->dependents_mask; + + foreach_batch(dep, cache, batch->dependents_mask) + dependents_mask |= recursive_dependents_mask(dep); + + return dependents_mask; } void -fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, - enum fd_resource_status status) +fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep) +{ + fd_screen_assert_locked(batch->ctx->screen); + + if (batch->dependents_mask & (1 << dep->idx)) + return; + + /* a loop should not be possible */ + debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep))); + + struct fd_batch *other = NULL; + fd_batch_reference_locked(&other, dep); + batch->dependents_mask |= (1 << dep->idx); + DBG("%p: added dependency on %p", batch, dep); +} + +static void +flush_write_batch(struct fd_resource *rsc) { - rsc->status |= status; + struct fd_batch *b = NULL; + fd_batch_reference_locked(&b, rsc->write_batch); + + fd_screen_unlock(b->ctx->screen); + fd_batch_flush(b); + fd_screen_lock(b->ctx->screen); + + fd_bc_invalidate_batch(b, false); + fd_batch_reference_locked(&b, NULL); +} + +static void +fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc) +{ + + if (likely(fd_batch_references_resource(batch, rsc))) { + debug_assert(_mesa_set_search(batch->resources, rsc)); + return; + } + + debug_assert(!_mesa_set_search(batch->resources, rsc)); + + _mesa_set_add(batch->resources, rsc); + rsc->batch_mask |= (1 << batch->idx); +} + +void +fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc) +{ + fd_screen_assert_locked(batch->ctx->screen); + + if (rsc->stencil) + fd_batch_resource_write(batch, rsc->stencil); + + DBG("%p: write %p", batch, rsc); + + rsc->valid = true; + + /* note, invalidate write batch, to avoid further writes to rsc + * resulting in a write-after-read hazard. + */ + /* if we are pending read or write by any other batch: */ + if (unlikely(rsc->batch_mask & ~(1 << batch->idx))) { + struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache; + struct fd_batch *dep; + + if (rsc->write_batch && rsc->write_batch != batch) + flush_write_batch(rsc); + + foreach_batch(dep, cache, rsc->batch_mask) { + struct fd_batch *b = NULL; + if (dep == batch) + continue; + /* note that batch_add_dep could flush and unref dep, so + * we need to hold a reference to keep it live for the + * fd_bc_invalidate_batch() + */ + fd_batch_reference(&b, dep); + fd_batch_add_dep(batch, b); + fd_bc_invalidate_batch(b, false); + fd_batch_reference_locked(&b, NULL); + } + } + fd_batch_reference_locked(&rsc->write_batch, batch); + + fd_batch_add_resource(batch, rsc); +} + +void +fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc) +{ + fd_screen_assert_locked(batch->ctx->screen); if (rsc->stencil) - rsc->stencil->status |= status; + fd_batch_resource_read(batch, rsc->stencil); - /* TODO resources can actually be shared across contexts, - * so I'm not sure a single list-head will do the trick? + DBG("%p: read %p", batch, rsc); + + /* If reading a resource pending a write, go ahead and flush the + * writer. This avoids situations where we end up having to + * flush the current batch in _resource_used() */ - debug_assert((rsc->pending_batch == batch) || !rsc->pending_batch); - list_delinit(&rsc->list); - list_addtail(&rsc->list, &batch->used_resources); - fd_batch_reference(&rsc->pending_batch, batch); + if (unlikely(rsc->write_batch && rsc->write_batch != batch)) + flush_write_batch(rsc); + + fd_batch_add_resource(batch, rsc); } void fd_batch_check_size(struct fd_batch *batch) { + debug_assert(!batch->flushed); + + if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) { + fd_batch_flush(batch); + return; + } + if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) return; struct fd_ringbuffer *ring = batch->draw; - if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) || - (fd_mesa_debug & FD_DBG_FLUSH)) - fd_context_render(&batch->ctx->base); + if ((ring->cur - ring->start) > (ring->size/4 - 0x1000)) + fd_batch_flush(batch); +} + +/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already + * been one since last draw: + */ +void +fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring) +{ + if (batch->needs_wfi) { + if (batch->ctx->screen->gpu_id >= 500) + OUT_WFI5(ring); + else + OUT_WFI(ring); + batch->needs_wfi = false; + } }