X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Ffreedreno_batch.c;h=106959b4fed9d1bfb952c94cd1cce982ebb2996e;hb=1b3aefad46bda59ff02c0d81c53fd3fbf249d8f4;hp=8dc0d7c612acb6ce0ff0bb5b32746271e9cc0463;hpb=a7fa44cd33378c6058b8069e6edcae1963c96bd3;p=mesa.git diff --git a/src/gallium/drivers/freedreno/freedreno_batch.c b/src/gallium/drivers/freedreno/freedreno_batch.c index 8dc0d7c612a..106959b4fed 100644 --- a/src/gallium/drivers/freedreno/freedreno_batch.c +++ b/src/gallium/drivers/freedreno/freedreno_batch.c @@ -39,60 +39,71 @@ static void batch_init(struct fd_batch *batch) { struct fd_context *ctx = batch->ctx; + enum fd_ringbuffer_flags flags = 0; unsigned size = 0; - if (ctx->screen->reorder) - util_queue_fence_init(&batch->flush_fence); - /* if kernel is too old to support unlimited # of cmd buffers, we * have no option but to allocate large worst-case sizes so that * we don't need to grow the ringbuffer. Performance is likely to * suffer, but there is no good alternative. + * + * XXX I think we can just require new enough kernel for this? */ if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) || (fd_mesa_debug & FD_DBG_NOGROW)){ size = 0x100000; + } else { + flags = FD_RINGBUFFER_GROWABLE; } - batch->draw = fd_ringbuffer_new(ctx->pipe, size); - if (!batch->nondraw) { - batch->binning = fd_ringbuffer_new(ctx->pipe, size); - batch->gmem = fd_ringbuffer_new(ctx->pipe, size); - - fd_ringbuffer_set_parent(batch->gmem, NULL); - fd_ringbuffer_set_parent(batch->draw, batch->gmem); - fd_ringbuffer_set_parent(batch->binning, batch->gmem); + batch->submit = fd_submit_new(ctx->pipe); + if (batch->nondraw) { + batch->draw = fd_submit_new_ringbuffer(batch->submit, size, + FD_RINGBUFFER_PRIMARY | flags); } else { - fd_ringbuffer_set_parent(batch->draw, NULL); + batch->gmem = fd_submit_new_ringbuffer(batch->submit, size, + FD_RINGBUFFER_PRIMARY | flags); + batch->draw = fd_submit_new_ringbuffer(batch->submit, size, + flags); + + if (ctx->screen->gpu_id < 600) { + batch->binning = fd_submit_new_ringbuffer(batch->submit, + size, flags); + } } batch->in_fence_fd = -1; batch->fence = fd_fence_create(batch); - batch->cleared = batch->partial_cleared = 0; + batch->cleared = 0; + batch->fast_cleared = 0; + batch->invalidated = 0; batch->restore = batch->resolve = 0; batch->needs_flush = false; batch->flushed = false; batch->gmem_reason = 0; batch->num_draws = 0; + batch->num_vertices = 0; batch->stage = FD_STAGE_NULL; fd_reset_wfi(batch); - /* reset maximal bounds: */ - batch->max_scissor.minx = batch->max_scissor.miny = ~0; - batch->max_scissor.maxx = batch->max_scissor.maxy = 0; - util_dynarray_init(&batch->draw_patches, NULL); + util_dynarray_init(&batch->fb_read_patches, NULL); + + if (is_a2xx(ctx->screen)) { + util_dynarray_init(&batch->shader_patches, NULL); + util_dynarray_init(&batch->gmem_patches, NULL); + } if (is_a3xx(ctx->screen)) util_dynarray_init(&batch->rbrc_patches, NULL); - util_dynarray_init(&batch->gmem_patches, NULL); - assert(batch->resources->entries == 0); util_dynarray_init(&batch->samples, NULL); + + list_inithead(&batch->log_chunks); } struct fd_batch * @@ -130,28 +141,52 @@ batch_fini(struct fd_batch *batch) /* in case batch wasn't flushed but fence was created: */ fd_fence_populate(batch->fence, 0, -1); - fd_fence_ref(NULL, &batch->fence, NULL); + fd_fence_ref(&batch->fence, NULL); fd_ringbuffer_del(batch->draw); if (!batch->nondraw) { - fd_ringbuffer_del(batch->binning); + if (batch->binning) + fd_ringbuffer_del(batch->binning); fd_ringbuffer_del(batch->gmem); } else { debug_assert(!batch->binning); debug_assert(!batch->gmem); } + if (batch->lrz_clear) { fd_ringbuffer_del(batch->lrz_clear); batch->lrz_clear = NULL; } + if (batch->tile_setup) { + fd_ringbuffer_del(batch->tile_setup); + batch->tile_setup = NULL; + } + + if (batch->tile_fini) { + fd_ringbuffer_del(batch->tile_fini); + batch->tile_fini = NULL; + } + + if (batch->tessellation) { + fd_bo_del(batch->tessfactor_bo); + fd_bo_del(batch->tessparam_bo); + fd_ringbuffer_del(batch->tess_addrs_constobj); + } + + fd_submit_del(batch->submit); + util_dynarray_fini(&batch->draw_patches); + util_dynarray_fini(&batch->fb_read_patches); + + if (is_a2xx(batch->ctx->screen)) { + util_dynarray_fini(&batch->shader_patches); + util_dynarray_fini(&batch->gmem_patches); + } if (is_a3xx(batch->ctx->screen)) util_dynarray_fini(&batch->rbrc_patches); - util_dynarray_fini(&batch->gmem_patches); - while (batch->samples.size > 0) { struct fd_hw_sample *samp = util_dynarray_pop(&batch->samples, struct fd_hw_sample *); @@ -159,8 +194,7 @@ batch_fini(struct fd_batch *batch) } util_dynarray_fini(&batch->samples); - if (batch->ctx->screen->reorder) - util_queue_fence_destroy(&batch->flush_fence); + assert(list_is_empty(&batch->log_chunks)); } static void @@ -171,7 +205,7 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush) foreach_batch(dep, cache, batch->dependents_mask) { if (flush) - fd_batch_flush(dep, false, false); + fd_batch_flush(dep); fd_batch_reference(&dep, NULL); } @@ -181,8 +215,6 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush) static void batch_reset_resources_locked(struct fd_batch *batch) { - struct set_entry *entry; - pipe_mutex_assert_locked(batch->ctx->screen->lock); set_foreach(batch->resources, entry) { @@ -208,8 +240,6 @@ batch_reset(struct fd_batch *batch) { DBG("%p", batch); - fd_batch_sync(batch); - batch_flush_reset_dependencies(batch, false); batch_reset_resources(batch); @@ -239,10 +269,10 @@ __fd_batch_destroy(struct fd_batch *batch) debug_assert(batch->resources->entries == 0); _mesa_set_destroy(batch->resources, NULL); + fd_context_unlock(ctx); batch_flush_reset_dependencies(batch, false); debug_assert(batch->dependents_mask == 0); - fd_context_unlock(ctx); util_copy_framebuffer_state(&batch->framebuffer, NULL); batch_fini(batch); free(batch); @@ -252,37 +282,11 @@ __fd_batch_destroy(struct fd_batch *batch) void __fd_batch_describe(char* buf, const struct fd_batch *batch) { - util_sprintf(buf, "fd_batch<%u>", batch->seqno); -} - -void -fd_batch_sync(struct fd_batch *batch) -{ - if (!batch->ctx->screen->reorder) - return; - util_queue_fence_wait(&batch->flush_fence); -} - -static void -batch_flush_func(void *job, int id) -{ - struct fd_batch *batch = job; - - DBG("%p", batch); - - fd_gmem_render_tiles(batch); - batch_reset_resources(batch); + sprintf(buf, "fd_batch<%u>", batch->seqno); } static void -batch_cleanup_func(void *job, int id) -{ - struct fd_batch *batch = job; - fd_batch_reference(&batch, NULL); -} - -static void -batch_flush(struct fd_batch *batch, bool force) +batch_flush(struct fd_batch *batch) { DBG("%p: needs_flush=%d", batch, batch->needs_flush); @@ -300,20 +304,10 @@ batch_flush(struct fd_batch *batch, bool force) batch->flushed = true; - if (batch->ctx->screen->reorder) { - struct fd_batch *tmp = NULL; - fd_batch_reference(&tmp, batch); - - if (!util_queue_is_initialized(&batch->ctx->flush_queue)) - util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0); + fd_fence_ref(&batch->ctx->last_fence, batch->fence); - util_queue_add_job(&batch->ctx->flush_queue, - batch, &batch->flush_fence, - batch_flush_func, batch_cleanup_func); - } else { - fd_gmem_render_tiles(batch); - batch_reset_resources(batch); - } + fd_gmem_render_tiles(batch); + batch_reset_resources(batch); debug_assert(batch->reference.count > 0); @@ -331,10 +325,9 @@ batch_flush(struct fd_batch *batch, bool force) * a fence to sync on */ void -fd_batch_flush(struct fd_batch *batch, bool sync, bool force) +fd_batch_flush(struct fd_batch *batch) { struct fd_batch *tmp = NULL; - bool newbatch = false; /* NOTE: we need to hold an extra ref across the body of flush, * since the last ref to this batch could be dropped when cleaning @@ -342,62 +335,39 @@ fd_batch_flush(struct fd_batch *batch, bool sync, bool force) */ fd_batch_reference(&tmp, batch); - if (batch == batch->ctx->batch) { - batch->ctx->batch = NULL; - newbatch = true; - } - - batch_flush(tmp, force); + batch_flush(tmp); - if (newbatch) { - struct fd_context *ctx = batch->ctx; - struct fd_batch *new_batch; - - if (ctx->screen->reorder) { - /* defer allocating new batch until one is needed for rendering - * to avoid unused batches for apps that create many contexts - */ - new_batch = NULL; - } else { - new_batch = fd_batch_create(ctx, false); - util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer); - } - - fd_batch_reference(&batch, NULL); - ctx->batch = new_batch; + if (batch == batch->ctx->batch) { + fd_batch_reference(&batch->ctx->batch, NULL); } - if (sync) - fd_batch_sync(tmp); - fd_batch_reference(&tmp, NULL); } -/* does 'batch' depend directly or indirectly on 'other' ? */ -static bool -batch_depends_on(struct fd_batch *batch, struct fd_batch *other) +/* find a batches dependents mask, including recursive dependencies: */ +static uint32_t +recursive_dependents_mask(struct fd_batch *batch) { struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache; struct fd_batch *dep; - - if (batch->dependents_mask & (1 << other->idx)) - return true; + uint32_t dependents_mask = batch->dependents_mask; foreach_batch(dep, cache, batch->dependents_mask) - if (batch_depends_on(batch, dep)) - return true; + dependents_mask |= recursive_dependents_mask(dep); - return false; + return dependents_mask; } void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep) { + pipe_mutex_assert_locked(batch->ctx->screen->lock); + if (batch->dependents_mask & (1 << dep->idx)) return; /* a loop should not be possible */ - debug_assert(!batch_depends_on(dep, batch)); + debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep))); struct fd_batch *other = NULL; fd_batch_reference_locked(&other, dep); @@ -409,10 +379,10 @@ static void flush_write_batch(struct fd_resource *rsc) { struct fd_batch *b = NULL; - fd_batch_reference(&b, rsc->write_batch); + fd_batch_reference_locked(&b, rsc->write_batch); mtx_unlock(&b->ctx->screen->lock); - fd_batch_flush(b, true, false); + fd_batch_flush(b); mtx_lock(&b->ctx->screen->lock); fd_bc_invalidate_batch(b, false); @@ -469,8 +439,10 @@ fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool wri flush_write_batch(rsc); } - if (rsc->batch_mask & (1 << batch->idx)) + if (rsc->batch_mask & (1 << batch->idx)) { + debug_assert(_mesa_set_search(batch->resources, rsc)); return; + } debug_assert(!_mesa_set_search(batch->resources, rsc)); @@ -484,7 +456,7 @@ fd_batch_check_size(struct fd_batch *batch) debug_assert(!batch->flushed); if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) { - fd_batch_flush(batch, true, false); + fd_batch_flush(batch); return; } @@ -493,7 +465,7 @@ fd_batch_check_size(struct fd_batch *batch) struct fd_ringbuffer *ring = batch->draw; if ((ring->cur - ring->start) > (ring->size/4 - 0x1000)) - fd_batch_flush(batch, true, false); + fd_batch_flush(batch); } /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already