uint32_t dirty_mask;
};
+struct fd_shaderbuf_stateobj {
+ struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
+ uint32_t enabled_mask;
+ uint32_t dirty_mask;
+};
+
struct fd_vertexbuf_stateobj {
struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
unsigned count;
FD_DIRTY_VIEWPORT = BIT(8),
FD_DIRTY_VTXSTATE = BIT(9),
FD_DIRTY_VTXBUF = BIT(10),
- FD_DIRTY_INDEXBUF = BIT(11),
+
FD_DIRTY_SCISSOR = BIT(12),
FD_DIRTY_STREAMOUT = BIT(13),
FD_DIRTY_UCP = BIT(14),
FD_DIRTY_SHADER_PROG = BIT(0),
FD_DIRTY_SHADER_CONST = BIT(1),
FD_DIRTY_SHADER_TEX = BIT(2),
+ FD_DIRTY_SHADER_SSBO = BIT(3),
};
struct fd_context {
/* slab for pipe_transfer allocations: */
struct slab_child_pool transfer_pool;
+ /**
+ * query related state:
+ */
+ /*@{*/
/* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
struct slab_mempool sample_pool;
struct slab_mempool sample_period_pool;
/* sample-providers for hw queries: */
- const struct fd_hw_sample_provider *sample_providers[MAX_HW_SAMPLE_PROVIDERS];
+ const struct fd_hw_sample_provider *hw_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
/* list of active queries: */
- struct list_head active_queries;
+ struct list_head hw_active_queries;
+
+ /* sample-providers for accumulating hw queries: */
+ const struct fd_acc_sample_provider *acc_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
+
+ /* list of active accumulating queries: */
+ struct list_head acc_active_queries;
+ /*@}*/
/* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
* DI_PT_x value to use for draw initiator. There are some
* means we'd always have to recalc tiles ever batch)
*/
struct fd_gmem_stateobj gmem;
- struct fd_vsc_pipe pipe[8];
+ struct fd_vsc_pipe pipe[16];
struct fd_tile tile[512];
/* which state objects need to be re-emit'd: */
/* per shader-stage dirty status: */
enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES];
+ void *compute;
struct pipe_blend_state *blend;
struct pipe_rasterizer_state *rasterizer;
struct pipe_depth_stencil_alpha_state *zsa;
struct pipe_poly_stipple stipple;
struct pipe_viewport_state viewport;
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
- struct pipe_index_buffer indexbuf;
+ struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES];
struct fd_streamout_stateobj streamout;
struct pipe_clip_state ucp;
void (*emit_sysmem_fini)(struct fd_batch *batch);
/* draw: */
- bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info);
- void (*clear)(struct fd_context *ctx, unsigned buffers,
+ bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset);
+ bool (*clear)(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil);
+ /* compute: */
+ void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info);
+
/* constant emit: (note currently not used/needed for a2xx) */
void (*emit_const)(struct fd_ringbuffer *ring, enum shader_t type,
uint32_t regid, uint32_t offset, uint32_t sizedwords,
void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
struct fd_ringbuffer *ring);
- void (*query_set_stage)(struct fd_batch *batch,
- struct fd_ringbuffer *ring, enum fd_render_stage stage);
+ void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage);
/*
* Common pre-cooked VBO state (used for a3xx and later):
fd_context_all_clean(struct fd_context *ctx)
{
ctx->dirty = 0;
- for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++)
+ for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {
+ /* don't mark compute state as clean, since it is not emitted
+ * during normal draw call. The places that call _all_dirty(),
+ * it is safe to mark compute state dirty as well, but the
+ * inverse is not true.
+ */
+ if (i == PIPE_SHADER_COMPUTE)
+ continue;
ctx->dirty_shader[i] = 0;
+ }
}
static inline struct pipe_scissor_state *
}
static inline void
-fd_batch_set_stage(struct fd_batch *batch,
- struct fd_ringbuffer *ring, enum fd_render_stage stage)
+fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
{
struct fd_context *ctx = batch->ctx;
return;
if (ctx->query_set_stage)
- ctx->query_set_stage(batch, ring, stage);
+ ctx->query_set_stage(batch, stage);
batch->stage = stage;
}