struct fd_shaderbuf_stateobj {
struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
uint32_t enabled_mask;
+ uint32_t writable_mask;
};
struct fd_shaderimg_stateobj {
struct fd_context {
struct pipe_context base;
+ /* We currently need to serialize emitting GMEM batches, because of
+ * VSC state access in the context.
+ *
+ * In practice this lock should not be contended, since pipe_context
+ * use should be single threaded. But it is needed to protect the
+ * case, with batch reordering where a ctxB batch triggers flushing
+ * a ctxA batch
+ */
+ mtx_t gmem_lock;
+
struct fd_device *dev;
struct fd_screen *screen;
struct fd_pipe *pipe;
- struct util_queue flush_queue;
-
struct blitter_context *blitter;
void *clear_rs_state;
struct primconvert_context *primconvert;
struct list_head acc_active_queries;
/*@}*/
+ /* Whether we need to walk the acc_active_queries next fd_set_stage() to
+ * update active queries (even if stage doesn't change).
+ */
+ bool update_active_queries;
+
/* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
* DI_PT_x value to use for draw initiator. There are some
* slight differences between generation:
*/
struct pipe_scissor_state disabled_scissor;
- /* Current gmem/tiling configuration.. gets updated on render_tiles()
- * if out of date with current maximal-scissor/cpp:
- *
- * (NOTE: this is kind of related to the batch, but moving it there
- * means we'd always have to recalc tiles ever batch)
- */
- struct fd_gmem_stateobj gmem;
- struct fd_vsc_pipe vsc_pipe[32];
- struct fd_tile tile[512];
+ /* Per vsc pipe bo's (a2xx-a5xx): */
+ struct fd_bo *vsc_pipe_bo[32];
/* which state objects need to be re-emit'd: */
enum fd_dirty_3d_state dirty;
/* GMEM/tile handling fxns: */
void (*emit_tile_init)(struct fd_batch *batch);
- void (*emit_tile_prep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile);
+ void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
/* optional, for GMEM bypass: */
* - solid_vbuf / 12 / R32G32B32_FLOAT
*/
struct fd_vertex_state blit_vbuf_state;
+
+ /*
+ * Info about state of previous draw, for state that comes from
+ * pipe_draw_info (ie. not part of a CSO). This allows us to
+ * skip some register emit when the state doesn't change from
+ * draw-to-draw
+ */
+ struct {
+ bool dirty; /* last draw state unknown */
+ bool primitive_restart;
+ uint32_t index_start;
+ uint32_t instance_start;
+ uint32_t restart_index;
+ } last;
};
static inline struct fd_context *
static inline void
fd_context_all_dirty(struct fd_context *ctx)
{
+ ctx->last.dirty = true;
ctx->dirty = ~0;
for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++)
ctx->dirty_shader[i] = ~0;
* don't enable queries which should be paused during internal
* blits:
*/
- if ((batch->stage == FD_STAGE_BLIT) &&
- (stage != FD_STAGE_NULL))
- return;
+ if (batch->stage == FD_STAGE_BLIT && stage != FD_STAGE_NULL)
+ stage = FD_STAGE_BLIT;
if (ctx->query_set_stage)
ctx->query_set_stage(batch, stage);