X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Ffreedreno%2Ffreedreno_batch.h;h=5061186f1b5842415f6dcb5db30c56ba162f6dc6;hb=8b30114dda8b785c9ee3812638d4bd7c4bf658e7;hp=a5fa6ce5a22d1027a658b602e1fef154b2c08bdb;hpb=15ebf387fc43632be0e68365cf92ac8fb1b64a9c;p=mesa.git diff --git a/src/gallium/drivers/freedreno/freedreno_batch.h b/src/gallium/drivers/freedreno/freedreno_batch.h index a5fa6ce5a22..5061186f1b5 100644 --- a/src/gallium/drivers/freedreno/freedreno_batch.h +++ b/src/gallium/drivers/freedreno/freedreno_batch.h @@ -55,7 +55,7 @@ enum fd_render_stage { FD_STAGE_ALL = 0xff, }; -#define MAX_HW_SAMPLE_PROVIDERS 5 +#define MAX_HW_SAMPLE_PROVIDERS 7 struct fd_hw_sample_provider; struct fd_hw_sample; @@ -84,6 +84,10 @@ struct fd_batch { * The 'cleared' bits will be set for buffers which are *entirely* * cleared, and 'partial_cleared' bits will be set if you must * check cleared_scissor. + * + * The 'invalidated' bits are set for cleared buffers, and buffers + * where the contents are undefined, ie. what we don't need to restore + * to gmem. */ enum { /* align bitmask values w/ PIPE_CLEAR_*.. since that is convenient.. */ @@ -91,11 +95,15 @@ struct fd_batch { FD_BUFFER_DEPTH = PIPE_CLEAR_DEPTH, FD_BUFFER_STENCIL = PIPE_CLEAR_STENCIL, FD_BUFFER_ALL = FD_BUFFER_COLOR | FD_BUFFER_DEPTH | FD_BUFFER_STENCIL, - } cleared, partial_cleared, restore, resolve; + } invalidated, cleared, fast_cleared, restore, resolve; + /* is this a non-draw batch (ie compute/blit which has no pfb state)? */ + bool nondraw : 1; bool needs_flush : 1; + bool flushed : 1; bool blit : 1; bool back_blit : 1; /* only blit so far is resource shadowing back-blit */ + bool tessellation : 1; /* tessellation used in batch */ /* Keep track if WAIT_FOR_IDLE is needed for registers we need * to update via RMW: @@ -113,11 +121,12 @@ struct fd_batch { FD_GMEM_DEPTH_ENABLED = 0x02, FD_GMEM_STENCIL_ENABLED = 0x04, - FD_GMEM_MSAA_ENABLED = 0x08, FD_GMEM_BLEND_ENABLED = 0x10, FD_GMEM_LOGICOP_ENABLED = 0x20, + FD_GMEM_FB_READ = 0x40, } gmem_reason; unsigned num_draws; /* number of draws in current batch */ + unsigned num_vertices; /* number of vertices in current batch */ /* Track the maximal bounds of the scissor of all the draws within a * batch. Used at the tile rendering step (fd_gmem_render_tiles(), @@ -125,19 +134,14 @@ struct fd_batch { */ struct pipe_scissor_state max_scissor; - /* Track the cleared scissor for color/depth/stencil, so we know - * which, if any, tiles need to be restored (mem2gmem). Only valid - * if the corresponding bit in ctx->cleared is set. - */ - struct { - struct pipe_scissor_state color, depth, stencil; - } cleared_scissor; - /* Keep track of DRAW initiators that need to be patched up depending * on whether we using binning or not: */ struct util_dynarray draw_patches; + /* texture state that needs patching for fb_read: */ + struct util_dynarray fb_read_patches; + /* Keep track of writes to RB_RENDER_CONTROL which need to be patched * once we know whether or not to use GMEM, and GMEM tile pitch. * @@ -146,8 +150,22 @@ struct fd_batch { */ struct util_dynarray rbrc_patches; + /* Keep track of GMEM related values that need to be patched up once we + * know the gmem layout: + */ + struct util_dynarray gmem_patches; + + /* Keep track of pointer to start of MEM exports for a20x binning shaders + * + * this is so the end of the shader can be cut off at the right point + * depending on the GMEM configuration + */ + struct util_dynarray shader_patches; + struct pipe_framebuffer_state framebuffer; + struct fd_submit *submit; + /** draw pass cmdstream: */ struct fd_ringbuffer *draw; /** binning pass cmdstream: */ @@ -157,6 +175,12 @@ struct fd_batch { // TODO maybe more generically split out clear and clear_binning rings? struct fd_ringbuffer *lrz_clear; + struct fd_ringbuffer *tile_setup; + struct fd_ringbuffer *tile_fini; + + union pipe_color_union clear_color[MAX_RENDER_TARGETS]; + double clear_depth; + unsigned clear_stencil; /** * hw query related state: @@ -200,13 +224,26 @@ struct fd_batch { /** set of dependent batches.. holds refs to dependent batches: */ uint32_t dependents_mask; + + /* Buffer for tessellation engine input + */ + struct fd_bo *tessfactor_bo; + uint32_t tessfactor_size; + + /* Buffer for passing parameters between TCS and TES + */ + struct fd_bo *tessparam_bo; + uint32_t tessparam_size; + + struct fd_ringbuffer *tess_addrs_constobj; }; -struct fd_batch * fd_batch_create(struct fd_context *ctx); +struct fd_batch * fd_batch_create(struct fd_context *ctx, bool nondraw); void fd_batch_reset(struct fd_batch *batch); void fd_batch_sync(struct fd_batch *batch); -void fd_batch_flush(struct fd_batch *batch, bool sync, bool force); +void fd_batch_flush(struct fd_batch *batch, bool sync); +void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep); void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write); void fd_batch_check_size(struct fd_batch *batch); @@ -223,18 +260,12 @@ void __fd_batch_destroy(struct fd_batch *batch); * WARNING the _locked() version can briefly drop the lock. Without * recursive mutexes, I'm not sure there is much else we can do (since * __fd_batch_destroy() needs to unref resources) + * + * WARNING you must acquire the screen->lock and use the _locked() + * version in case that the batch being ref'd can disappear under + * you. */ -static inline void -fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch) -{ - struct fd_batch *old_batch = *ptr; - if (pipe_reference_described(&(*ptr)->reference, &batch->reference, - (debug_reference_descriptor)__fd_batch_describe)) - __fd_batch_destroy(old_batch); - *ptr = batch; -} - /* fwd-decl prototypes to untangle header dependency :-/ */ static inline void fd_context_assert_locked(struct fd_context *ctx); static inline void fd_context_lock(struct fd_context *ctx); @@ -245,21 +276,32 @@ fd_batch_reference_locked(struct fd_batch **ptr, struct fd_batch *batch) { struct fd_batch *old_batch = *ptr; + /* only need lock if a reference is dropped: */ if (old_batch) fd_context_assert_locked(old_batch->ctx); - else if (batch) - fd_context_assert_locked(batch->ctx); if (pipe_reference_described(&(*ptr)->reference, &batch->reference, - (debug_reference_descriptor)__fd_batch_describe)) { - struct fd_context *ctx = old_batch->ctx; - fd_context_unlock(ctx); + (debug_reference_descriptor)__fd_batch_describe)) __fd_batch_destroy(old_batch); - fd_context_lock(ctx); - } + *ptr = batch; } +static inline void +fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch) +{ + struct fd_batch *old_batch = *ptr; + struct fd_context *ctx = old_batch ? old_batch->ctx : NULL; + + if (ctx) + fd_context_lock(ctx); + + fd_batch_reference_locked(ptr, batch); + + if (ctx) + fd_context_unlock(ctx); +} + #include "freedreno_context.h" static inline void