};
struct fd_program_stateobj {
- void *vp, *fp;
+ void *vs, *hs, *ds, *gs, *fs;
};
struct fd_constbuf_stateobj {
struct fd_shaderbuf_stateobj {
struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
uint32_t enabled_mask;
+ uint32_t writable_mask;
};
struct fd_shaderimg_stateobj {
struct fd_streamout_stateobj {
struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
+ /* Bitmask of stream that should be reset. */
+ unsigned reset;
+
unsigned num_targets;
/* Track offset from vtxcnt for streamout data. This counter
* is just incremented by # of vertices on each draw until
struct fd_context {
struct pipe_context base;
+ /* We currently need to serialize emitting GMEM batches, because of
+ * VSC state access in the context.
+ *
+ * In practice this lock should not be contended, since pipe_context
+ * use should be single threaded. But it is needed to protect the
+ * case, with batch reordering where a ctxB batch triggers flushing
+ * a ctxA batch
+ */
+ mtx_t gmem_lock;
+
struct fd_device *dev;
struct fd_screen *screen;
struct fd_pipe *pipe;
- struct util_queue flush_queue;
-
struct blitter_context *blitter;
void *clear_rs_state;
struct primconvert_context *primconvert;
struct list_head acc_active_queries;
/*@}*/
+ /* Whether we need to walk the acc_active_queries next fd_set_stage() to
+ * update active queries (even if stage doesn't change).
+ */
+ bool update_active_queries;
+
/* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
* DI_PT_x value to use for draw initiator. There are some
* slight differences between generation:
uint64_t draw_calls;
uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw, batch_restore;
uint64_t staging_uploads, shadow_uploads;
- uint64_t vs_regs, fs_regs;
+ uint64_t vs_regs, hs_regs, ds_regs, gs_regs, fs_regs;
} stats;
/* Current batch.. the rule here is that you can deref ctx->batch
*/
struct pipe_scissor_state disabled_scissor;
- /* Current gmem/tiling configuration.. gets updated on render_tiles()
- * if out of date with current maximal-scissor/cpp:
- *
- * (NOTE: this is kind of related to the batch, but moving it there
- * means we'd always have to recalc tiles ever batch)
- */
- struct fd_gmem_stateobj gmem;
- struct fd_vsc_pipe vsc_pipe[32];
- struct fd_tile tile[512];
+ /* Per vsc pipe bo's (a2xx-a5xx): */
+ struct fd_bo *vsc_pipe_bo[32];
/* which state objects need to be re-emit'd: */
enum fd_dirty_3d_state dirty;
/* GMEM/tile handling fxns: */
void (*emit_tile_init)(struct fd_batch *batch);
- void (*emit_tile_prep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile);
+ void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
/* optional, for GMEM bypass: */
/* compute: */
void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info);
- /* constant emit: (note currently not used/needed for a2xx) */
- void (*emit_const)(struct fd_ringbuffer *ring, gl_shader_stage type,
- uint32_t regid, uint32_t offset, uint32_t sizedwords,
- const uint32_t *dwords, struct pipe_resource *prsc);
- /* emit bo addresses as constant: */
- void (*emit_const_bo)(struct fd_ringbuffer *ring, gl_shader_stage type, boolean write,
- uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets);
-
- /* indirect-branch emit: */
- void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringbuffer *target);
-
/* query: */
- struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type);
+ struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type, unsigned index);
void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
struct fd_ringbuffer *ring);
/* blitter: */
bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
- /* simple gpu "memcpy": */
- void (*mem_to_mem)(struct fd_ringbuffer *ring, struct pipe_resource *dst,
- unsigned dst_off, struct pipe_resource *src, unsigned src_off,
- unsigned sizedwords);
+ /* handling for barriers: */
+ void (*framebuffer_barrier)(struct fd_context *ctx);
/*
* Common pre-cooked VBO state (used for a3xx and later):
* - solid_vbuf / 12 / R32G32B32_FLOAT
*/
struct fd_vertex_state blit_vbuf_state;
+
+ /*
+ * Info about state of previous draw, for state that comes from
+ * pipe_draw_info (ie. not part of a CSO). This allows us to
+ * skip some register emit when the state doesn't change from
+ * draw-to-draw
+ */
+ struct {
+ bool dirty; /* last draw state unknown */
+ bool primitive_restart;
+ uint32_t index_start;
+ uint32_t instance_start;
+ uint32_t restart_index;
+ } last;
};
static inline struct fd_context *
static inline void
fd_context_all_dirty(struct fd_context *ctx)
{
+ ctx->last.dirty = true;
ctx->dirty = ~0;
for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++)
ctx->dirty_shader[i] = ~0;
* don't enable queries which should be paused during internal
* blits:
*/
- if ((batch->stage == FD_STAGE_BLIT) &&
- (stage != FD_STAGE_NULL))
- return;
+ if (batch->stage == FD_STAGE_BLIT && stage != FD_STAGE_NULL)
+ stage = FD_STAGE_BLIT;
if (ctx->query_set_stage)
ctx->query_set_stage(batch, stage);