};
struct fd_program_stateobj {
- void *vs, *fs;
+ void *vs, *hs, *ds, *gs, *fs;
};
struct fd_constbuf_stateobj {
struct fd_shaderbuf_stateobj {
struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
uint32_t enabled_mask;
+ uint32_t writable_mask;
};
struct fd_shaderimg_stateobj {
FD_DIRTY_VTXSTATE = BIT(9),
FD_DIRTY_VTXBUF = BIT(10),
FD_DIRTY_MIN_SAMPLES = BIT(11),
-
FD_DIRTY_SCISSOR = BIT(12),
FD_DIRTY_STREAMOUT = BIT(13),
FD_DIRTY_UCP = BIT(14),
FD_DIRTY_PROG = BIT(16),
FD_DIRTY_CONST = BIT(17),
FD_DIRTY_TEX = BIT(18),
+ FD_DIRTY_IMAGE = BIT(19),
+ FD_DIRTY_SSBO = BIT(20),
/* only used by a2xx.. possibly can be removed.. */
- FD_DIRTY_TEXSTATE = BIT(19),
+ FD_DIRTY_TEXSTATE = BIT(21),
+
+ /* fine grained state changes, for cases where state is not orthogonal
+ * from hw perspective:
+ */
+ FD_DIRTY_RASTERIZER_DISCARD = BIT(24),
};
/* per shader-stage dirty state: */
struct fd_context {
struct pipe_context base;
+ struct list_head node; /* node in screen->context_list */
+
+ /* We currently need to serialize emitting GMEM batches, because of
+ * VSC state access in the context.
+ *
+ * In practice this lock should not be contended, since pipe_context
+ * use should be single threaded. But it is needed to protect the
+ * case, with batch reordering where a ctxB batch triggers flushing
+ * a ctxA batch
+ */
+ mtx_t gmem_lock;
+
struct fd_device *dev;
struct fd_screen *screen;
struct fd_pipe *pipe;
- struct util_queue flush_queue;
-
struct blitter_context *blitter;
void *clear_rs_state;
struct primconvert_context *primconvert;
struct list_head acc_active_queries;
/*@}*/
+ /* Whether we need to walk the acc_active_queries next fd_set_stage() to
+ * update active queries (even if stage doesn't change).
+ */
+ bool update_active_queries;
+
+ /* Current state of pctx->set_active_query_state() (i.e. "should drawing
+ * be counted against non-perfcounter queries")
+ */
+ bool active_queries;
+
/* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
* DI_PT_x value to use for draw initiator. There are some
* slight differences between generation:
uint64_t draw_calls;
uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw, batch_restore;
uint64_t staging_uploads, shadow_uploads;
- uint64_t vs_regs, fs_regs;
+ uint64_t vs_regs, hs_regs, ds_regs, gs_regs, fs_regs;
} stats;
/* Current batch.. the rule here is that you can deref ctx->batch
* contents. Main point is to eliminate blits from fd_try_shadow_resource().
* For example, in case of texture upload + gen-mipmaps.
*/
- bool in_blit : 1;
+ bool in_discard_blit : 1;
+
+ /* points to either scissor or disabled_scissor depending on rast state: */
+ struct pipe_scissor_state *current_scissor;
struct pipe_scissor_state scissor;
*/
struct pipe_scissor_state disabled_scissor;
- /* Current gmem/tiling configuration.. gets updated on render_tiles()
- * if out of date with current maximal-scissor/cpp:
- *
- * (NOTE: this is kind of related to the batch, but moving it there
- * means we'd always have to recalc tiles ever batch)
- */
- struct fd_gmem_stateobj gmem;
- struct fd_vsc_pipe vsc_pipe[32];
- struct fd_tile tile[512];
+ /* Per vsc pipe bo's (a2xx-a5xx): */
+ struct fd_bo *vsc_pipe_bo[32];
/* which state objects need to be re-emit'd: */
enum fd_dirty_3d_state dirty;
struct pipe_debug_callback debug;
+ /* Called on rebind_resource() for any per-gen cleanup required: */
+ void (*rebind_resource)(struct fd_context *ctx, struct fd_resource *rsc);
+
/* GMEM/tile handling fxns: */
void (*emit_tile_init)(struct fd_batch *batch);
- void (*emit_tile_prep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile)(struct fd_batch *batch, struct fd_tile *tile);
- void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile);
+ void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile);
void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
/* optional, for GMEM bypass: */
/* handling for barriers: */
void (*framebuffer_barrier)(struct fd_context *ctx);
+ /* logger: */
+ void (*record_timestamp)(struct fd_ringbuffer *ring, struct fd_bo *bo, unsigned offset);
+ uint64_t (*ts_to_ns)(uint64_t ts);
+
+ struct list_head log_chunks; /* list of flushed log chunks in fifo order */
+ unsigned frame_nr; /* frame counter (for fd_log) */
+ FILE *log_out;
+
/*
* Common pre-cooked VBO state (used for a3xx and later):
*/
* - solid_vbuf / 12 / R32G32B32_FLOAT
*/
struct fd_vertex_state blit_vbuf_state;
+
+ /*
+ * Info about state of previous draw, for state that comes from
+ * pipe_draw_info (ie. not part of a CSO). This allows us to
+ * skip some register emit when the state doesn't change from
+ * draw-to-draw
+ */
+ struct {
+ bool dirty; /* last draw state unknown */
+ bool primitive_restart;
+ uint32_t index_start;
+ uint32_t instance_start;
+ uint32_t restart_index;
+ uint32_t streamout_mask;
+ } last;
};
static inline struct fd_context *
static inline void
fd_context_assert_locked(struct fd_context *ctx)
{
- pipe_mutex_assert_locked(ctx->screen->lock);
+ fd_screen_assert_locked(ctx->screen);
}
static inline void
fd_context_lock(struct fd_context *ctx)
{
- mtx_lock(&ctx->screen->lock);
+ fd_screen_lock(ctx->screen);
}
static inline void
fd_context_unlock(struct fd_context *ctx)
{
- mtx_unlock(&ctx->screen->lock);
+ fd_screen_unlock(ctx->screen);
}
/* mark all state dirty: */
static inline void
fd_context_all_dirty(struct fd_context *ctx)
{
+ ctx->last.dirty = true;
ctx->dirty = ~0;
for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++)
ctx->dirty_shader[i] = ~0;
static inline void
fd_context_all_clean(struct fd_context *ctx)
{
+ ctx->last.dirty = false;
ctx->dirty = 0;
for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {
/* don't mark compute state as clean, since it is not emitted
static inline struct pipe_scissor_state *
fd_context_get_scissor(struct fd_context *ctx)
{
- if (ctx->rasterizer && ctx->rasterizer->scissor)
- return &ctx->scissor;
- return &ctx->disabled_scissor;
+ return ctx->current_scissor;
}
static inline bool
{
struct fd_context *ctx = batch->ctx;
- /* special case: internal blits (like mipmap level generation)
- * go through normal draw path (via util_blitter_blit()).. but
- * we need to ignore the FD_STAGE_DRAW which will be set, so we
- * don't enable queries which should be paused during internal
- * blits:
- */
- if ((batch->stage == FD_STAGE_BLIT) &&
- (stage != FD_STAGE_NULL))
- return;
-
if (ctx->query_set_stage)
ctx->query_set_stage(batch, stage);
void fd_context_setup_common_vbos(struct fd_context *ctx);
void fd_context_cleanup_common_vbos(struct fd_context *ctx);
+void fd_emit_string(struct fd_ringbuffer *ring, const char *string, int len);
+void fd_emit_string5(struct fd_ringbuffer *ring, const char *string, int len);
struct pipe_context * fd_context_init(struct fd_context *ctx,
struct pipe_screen *pscreen, const uint8_t *primtypes,