};
struct fd_program_stateobj {
- void *vp, *fp;
-
- /* rest only used by fd2.. split out: */
- uint8_t num_exports;
- /* Indexed by semantic name or TGSI_SEMANTIC_COUNT + semantic index
- * for TGSI_SEMANTIC_GENERIC. Special vs exports (position and point-
- * size) are not included in this
- */
- uint8_t export_linkage[63];
+ void *vs, *hs, *ds, *gs, *fs;
};
struct fd_constbuf_stateobj {
struct fd_shaderbuf_stateobj {
struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
uint32_t enabled_mask;
+ uint32_t writable_mask;
};
struct fd_shaderimg_stateobj {
struct fd_streamout_stateobj {
struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
+ /* Bitmask of stream that should be reset. */
+ unsigned reset;
+
unsigned num_targets;
/* Track offset from vtxcnt for streamout data. This counter
* is just incremented by # of vertices on each draw until
FD_DIRTY_VIEWPORT = BIT(8),
FD_DIRTY_VTXSTATE = BIT(9),
FD_DIRTY_VTXBUF = BIT(10),
+ FD_DIRTY_MIN_SAMPLES = BIT(11),
FD_DIRTY_SCISSOR = BIT(12),
FD_DIRTY_STREAMOUT = BIT(13),
FD_DIRTY_PROG = BIT(16),
FD_DIRTY_CONST = BIT(17),
FD_DIRTY_TEX = BIT(18),
+ FD_DIRTY_IMAGE = BIT(19),
+ FD_DIRTY_SSBO = BIT(20),
/* only used by a2xx.. possibly can be removed.. */
- FD_DIRTY_TEXSTATE = BIT(19),
+ FD_DIRTY_TEXSTATE = BIT(21),
};
/* per shader-stage dirty state: */
struct fd_context {
struct pipe_context base;
+ struct list_head node; /* node in screen->context_list */
+
+ /* We currently need to serialize emitting GMEM batches, because of
+ * VSC state access in the context.
+ *
+ * In practice this lock should not be contended, since pipe_context
+ * use should be single threaded. But it is needed to protect the
+ * case, with batch reordering where a ctxB batch triggers flushing
+ * a ctxA batch
+ */
+ mtx_t gmem_lock;
+
struct fd_device *dev;
struct fd_screen *screen;
struct fd_pipe *pipe;
- struct util_queue flush_queue;
-
struct blitter_context *blitter;
void *clear_rs_state;
struct primconvert_context *primconvert;
struct list_head acc_active_queries;
/*@}*/
+ /* Whether we need to walk the acc_active_queries next fd_set_stage() to
+ * update active queries (even if stage doesn't change).
+ */
+ bool update_active_queries;
+
+ /* Current state of pctx->set_active_query_state() (i.e. "should drawing
+ * be counted against non-perfcounter queries")
+ */
+ bool active_queries;
+
/* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
* DI_PT_x value to use for draw initiator. There are some
* slight differences between generation:
uint64_t draw_calls;
uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw, batch_restore;
uint64_t staging_uploads, shadow_uploads;
- uint64_t vs_regs, fs_regs;
+ uint64_t vs_regs, hs_regs, ds_regs, gs_regs, fs_regs;
} stats;
/* Current batch.. the rule here is that you can deref ctx->batch
*/
struct pipe_fence_handle *last_fence;
+ /* track last known reset status globally and per-context to
+ * determine if more resets occurred since then. If global reset
+ * count increases, it means some other context crashed. If
+ * per-context reset count increases, it means we crashed the
+ * gpu.
+ */
+ uint32_t context_reset_count, global_reset_count;
+
/* Are we in process of shadowing a resource? Used to detect recursion
* in transfer_map, and skip unneeded synchronization.
*/
* contents. Main point is to eliminate blits from fd_try_shadow_resource().
* For example, in case of texture upload + gen-mipmaps.
*/
- bool in_blit : 1;
+ bool in_discard_blit : 1;
struct pipe_scissor_state scissor;
*/
struct pipe_scissor_state disabled_scissor;
- /* Current gmem/tiling configuration.. gets updated on render_tiles()
- * if out of date with current maximal-scissor/cpp:
- *
- * (NOTE: this is kind of related to the batch, but moving it there
- * means we'd always have to recalc tiles ever batch)
- */
- struct fd_gmem_stateobj gmem;
- struct fd_vsc_pipe vsc_pipe[32];
- struct fd_tile tile[512];
+ /* Per vsc pipe bo's (a2xx-a5xx): */
+ struct fd_bo *vsc_pipe_bo[32];
/* which state objects need to be re-emit'd: */
enum fd_dirty_3d_state dirty;
struct pipe_blend_color blend_color;
struct pipe_stencil_ref stencil_ref;
unsigned sample_mask;
+ unsigned min_samples;
/* local context fb state, for when ctx->batch is null: */
struct pipe_framebuffer_state framebuffer;
struct pipe_poly_stipple stipple;
struct pipe_viewport_state viewport;
+ struct pipe_scissor_state viewport_scissor;
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES];
struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES];
/* GMEM/tile handling fxns: */
void (*emit_tile_init)(struct fd_batch *batch);
- void (*emit_tile_prep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile);
+ void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
/* optional, for GMEM bypass: */
/* compute: */
void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info);
- /* constant emit: (note currently not used/needed for a2xx) */
- void (*emit_const)(struct fd_ringbuffer *ring, enum shader_t type,
- uint32_t regid, uint32_t offset, uint32_t sizedwords,
- const uint32_t *dwords, struct pipe_resource *prsc);
- /* emit bo addresses as constant: */
- void (*emit_const_bo)(struct fd_ringbuffer *ring, enum shader_t type, boolean write,
- uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets);
-
- /* indirect-branch emit: */
- void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringbuffer *target);
-
/* query: */
- struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type);
+ struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type, unsigned index);
void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
struct fd_ringbuffer *ring);
void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage);
/* blitter: */
- void (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
+ bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
+
+ /* handling for barriers: */
+ void (*framebuffer_barrier)(struct fd_context *ctx);
+
+ /* logger: */
+ void (*record_timestamp)(struct fd_ringbuffer *ring, struct fd_bo *bo, unsigned offset);
+ uint64_t (*ts_to_ns)(uint64_t ts);
- /* simple gpu "memcpy": */
- void (*mem_to_mem)(struct fd_ringbuffer *ring, struct pipe_resource *dst,
- unsigned dst_off, struct pipe_resource *src, unsigned src_off,
- unsigned sizedwords);
+ struct list_head log_chunks; /* list of flushed log chunks in fifo order */
+ unsigned frame_nr; /* frame counter (for fd_log) */
+ FILE *log_out;
/*
* Common pre-cooked VBO state (used for a3xx and later):
* - solid_vbuf / 12 / R32G32B32_FLOAT
*/
struct fd_vertex_state blit_vbuf_state;
+
+ /*
+ * Info about state of previous draw, for state that comes from
+ * pipe_draw_info (ie. not part of a CSO). This allows us to
+ * skip some register emit when the state doesn't change from
+ * draw-to-draw
+ */
+ struct {
+ bool dirty; /* last draw state unknown */
+ bool primitive_restart;
+ uint32_t index_start;
+ uint32_t instance_start;
+ uint32_t restart_index;
+ } last;
};
static inline struct fd_context *
static inline void
fd_context_all_dirty(struct fd_context *ctx)
{
+ ctx->last.dirty = true;
ctx->dirty = ~0;
for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++)
ctx->dirty_shader[i] = ~0;
{
struct fd_context *ctx = batch->ctx;
- /* special case: internal blits (like mipmap level generation)
- * go through normal draw path (via util_blitter_blit()).. but
- * we need to ignore the FD_STAGE_DRAW which will be set, so we
- * don't enable queries which should be paused during internal
- * blits:
- */
- if ((batch->stage == FD_STAGE_BLIT) &&
- (stage != FD_STAGE_NULL))
- return;
-
if (ctx->query_set_stage)
ctx->query_set_stage(batch, stage);