-/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
-
/*
* Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
*
#include "indices/u_primconvert.h"
#include "util/u_blitter.h"
#include "util/list.h"
-#include "util/u_slab.h"
+#include "util/slab.h"
#include "util/u_string.h"
+#include "freedreno_batch.h"
#include "freedreno_screen.h"
#include "freedreno_gmem.h"
#include "freedreno_util.h"
struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
unsigned num_samplers;
unsigned valid_samplers;
+ /* number of samples per sampler, 2 bits per sampler: */
+ uint32_t samples;
};
struct fd_program_stateobj {
- void *vp, *fp;
-
- /* rest only used by fd2.. split out: */
- uint8_t num_exports;
- /* Indexed by semantic name or TGSI_SEMANTIC_COUNT + semantic index
- * for TGSI_SEMANTIC_GENERIC. Special vs exports (position and point-
- * size) are not included in this
- */
- uint8_t export_linkage[63];
+ void *vs, *hs, *ds, *gs, *fs;
};
struct fd_constbuf_stateobj {
struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];
uint32_t enabled_mask;
- uint32_t dirty_mask;
+};
+
+struct fd_shaderbuf_stateobj {
+ struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
+ uint32_t enabled_mask;
+ uint32_t writable_mask;
+};
+
+struct fd_shaderimg_stateobj {
+ struct pipe_image_view si[PIPE_MAX_SHADER_IMAGES];
+ uint32_t enabled_mask;
};
struct fd_vertexbuf_stateobj {
struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
unsigned count;
uint32_t enabled_mask;
- uint32_t dirty_mask;
};
struct fd_vertex_stateobj {
struct fd_streamout_stateobj {
struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
+ /* Bitmask of stream that should be reset. */
+ unsigned reset;
+
unsigned num_targets;
/* Track offset from vtxcnt for streamout data. This counter
* is just incremented by # of vertices on each draw until
unsigned offsets[PIPE_MAX_SO_BUFFERS];
};
+#define MAX_GLOBAL_BUFFERS 16
+struct fd_global_bindings_stateobj {
+ struct pipe_resource *buf[MAX_GLOBAL_BUFFERS];
+ uint32_t enabled_mask;
+};
+
/* group together the vertex and vertexbuf state.. for ease of passing
* around, and because various internal operations (gmem<->mem, etc)
* need their own vertex state:
struct fd_vertexbuf_stateobj vertexbuf;
};
-/* Bitmask of stages in rendering that a particular query query is
- * active. Queries will be automatically started/stopped (generating
- * additional fd_hw_sample_period's) on entrance/exit from stages that
- * are applicable to the query.
- *
- * NOTE: set the stage to NULL at end of IB to ensure no query is still
- * active. Things aren't going to work out the way you want if a query
- * is active across IB's (or between tile IB and draw IB)
- */
-enum fd_render_stage {
- FD_STAGE_NULL = 0x00,
- FD_STAGE_DRAW = 0x01,
- FD_STAGE_CLEAR = 0x02,
- /* TODO before queries which include MEM2GMEM or GMEM2MEM will
- * work we will need to call fd_hw_query_prepare() from somewhere
- * appropriate so that queries in the tiling IB get backed with
- * memory to write results to.
+/* global 3d pipeline dirty state: */
+enum fd_dirty_3d_state {
+ FD_DIRTY_BLEND = BIT(0),
+ FD_DIRTY_RASTERIZER = BIT(1),
+ FD_DIRTY_ZSA = BIT(2),
+ FD_DIRTY_BLEND_COLOR = BIT(3),
+ FD_DIRTY_STENCIL_REF = BIT(4),
+ FD_DIRTY_SAMPLE_MASK = BIT(5),
+ FD_DIRTY_FRAMEBUFFER = BIT(6),
+ FD_DIRTY_STIPPLE = BIT(7),
+ FD_DIRTY_VIEWPORT = BIT(8),
+ FD_DIRTY_VTXSTATE = BIT(9),
+ FD_DIRTY_VTXBUF = BIT(10),
+ FD_DIRTY_MIN_SAMPLES = BIT(11),
+
+ FD_DIRTY_SCISSOR = BIT(12),
+ FD_DIRTY_STREAMOUT = BIT(13),
+ FD_DIRTY_UCP = BIT(14),
+ FD_DIRTY_BLEND_DUAL = BIT(15),
+
+ /* These are a bit redundent with fd_dirty_shader_state, and possibly
+ * should be removed. (But OTOH kinda convenient in some places)
*/
- FD_STAGE_MEM2GMEM = 0x04,
- FD_STAGE_GMEM2MEM = 0x08,
- /* used for driver internal draws (ie. util_blitter_blit()): */
- FD_STAGE_BLIT = 0x10,
+ FD_DIRTY_PROG = BIT(16),
+ FD_DIRTY_CONST = BIT(17),
+ FD_DIRTY_TEX = BIT(18),
+ FD_DIRTY_IMAGE = BIT(19),
+ FD_DIRTY_SSBO = BIT(20),
+
+ /* only used by a2xx.. possibly can be removed.. */
+ FD_DIRTY_TEXSTATE = BIT(21),
};
-#define MAX_HW_SAMPLE_PROVIDERS 4
-struct fd_hw_sample_provider;
-struct fd_hw_sample;
+/* per shader-stage dirty state: */
+enum fd_dirty_shader_state {
+ FD_DIRTY_SHADER_PROG = BIT(0),
+ FD_DIRTY_SHADER_CONST = BIT(1),
+ FD_DIRTY_SHADER_TEX = BIT(2),
+ FD_DIRTY_SHADER_SSBO = BIT(3),
+ FD_DIRTY_SHADER_IMAGE = BIT(4),
+};
struct fd_context {
struct pipe_context base;
+ struct list_head node; /* node in screen->context_list */
+
+ /* We currently need to serialize emitting GMEM batches, because of
+ * VSC state access in the context.
+ *
+ * In practice this lock should not be contended, since pipe_context
+ * use should be single threaded. But it is needed to protect the
+ * case, with batch reordering where a ctxB batch triggers flushing
+ * a ctxA batch
+ */
+ mtx_t gmem_lock;
+
struct fd_device *dev;
struct fd_screen *screen;
+ struct fd_pipe *pipe;
struct blitter_context *blitter;
+ void *clear_rs_state;
struct primconvert_context *primconvert;
/* slab for pipe_transfer allocations: */
- struct util_slab_mempool transfer_pool;
-
- /* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
- struct util_slab_mempool sample_pool;
- struct util_slab_mempool sample_period_pool;
+ struct slab_child_pool transfer_pool;
- /* next sample offset.. incremented for each sample in the batch/
- * submit, reset to zero on next submit.
+ /**
+ * query related state:
*/
- uint32_t next_sample_offset;
+ /*@{*/
+ /* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
+ struct slab_mempool sample_pool;
+ struct slab_mempool sample_period_pool;
/* sample-providers for hw queries: */
- const struct fd_hw_sample_provider *sample_providers[MAX_HW_SAMPLE_PROVIDERS];
+ const struct fd_hw_sample_provider *hw_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
- /* cached samples (in case multiple queries need to reference
- * the same sample snapshot)
- */
- struct fd_hw_sample *sample_cache[MAX_HW_SAMPLE_PROVIDERS];
-
- /* which sample providers were active in the current batch: */
- uint32_t active_providers;
+ /* list of active queries: */
+ struct list_head hw_active_queries;
- /* tracking for current stage, to know when to start/stop
- * any active queries:
- */
- enum fd_render_stage stage;
+ /* sample-providers for accumulating hw queries: */
+ const struct fd_acc_sample_provider *acc_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
- /* list of active queries: */
- struct list_head active_queries;
+ /* list of active accumulating queries: */
+ struct list_head acc_active_queries;
+ /*@}*/
- /* list of queries that are not active, but were active in the
- * current submit:
+ /* Whether we need to walk the acc_active_queries next fd_set_stage() to
+ * update active queries (even if stage doesn't change).
*/
- struct list_head current_queries;
-
- /* current query result bo and tile stride: */
- struct fd_bo *query_bo;
- uint32_t query_tile_stride;
+ bool update_active_queries;
- /* list of resources used by currently-unsubmitted renders */
- struct list_head used_resources;
+ /* Current state of pctx->set_active_query_state() (i.e. "should drawing
+ * be counted against non-perfcounter queries")
+ */
+ bool active_queries;
/* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
* DI_PT_x value to use for draw initiator. There are some
struct fd_program_stateobj blit_prog[MAX_RENDER_TARGETS]; // TODO move to screen?
struct fd_program_stateobj blit_z, blit_zs;
- /* do we need to mem2gmem before rendering. We don't, if for example,
- * there was a glClear() that invalidated the entire previous buffer
- * contents. Keep track of which buffer(s) are cleared, or needs
- * restore. Masks of PIPE_CLEAR_*
- *
- * The 'cleared' bits will be set for buffers which are *entirely*
- * cleared, and 'partial_cleared' bits will be set if you must
- * check cleared_scissor.
- */
- enum {
- /* align bitmask values w/ PIPE_CLEAR_*.. since that is convenient.. */
- FD_BUFFER_COLOR = PIPE_CLEAR_COLOR,
- FD_BUFFER_DEPTH = PIPE_CLEAR_DEPTH,
- FD_BUFFER_STENCIL = PIPE_CLEAR_STENCIL,
- FD_BUFFER_ALL = FD_BUFFER_COLOR | FD_BUFFER_DEPTH | FD_BUFFER_STENCIL,
- } cleared, partial_cleared, restore, resolve;
-
- bool needs_flush;
-
- /* To decide whether to render to system memory, keep track of the
- * number of draws, and whether any of them require multisample,
- * depth_test (or depth write), stencil_test, blending, and
- * color_logic_Op (since those functions are disabled when by-
- * passing GMEM.
- */
- enum {
- FD_GMEM_CLEARS_DEPTH_STENCIL = 0x01,
- FD_GMEM_DEPTH_ENABLED = 0x02,
- FD_GMEM_STENCIL_ENABLED = 0x04,
-
- FD_GMEM_MSAA_ENABLED = 0x08,
- FD_GMEM_BLEND_ENABLED = 0x10,
- FD_GMEM_LOGICOP_ENABLED = 0x20,
- } gmem_reason;
- unsigned num_draws; /* number of draws in current batch */
-
/* Stats/counters:
*/
struct {
uint64_t prims_emitted;
uint64_t prims_generated;
uint64_t draw_calls;
- uint64_t batch_total, batch_sysmem, batch_gmem, batch_restore;
+ uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw, batch_restore;
+ uint64_t staging_uploads, shadow_uploads;
+ uint64_t vs_regs, hs_regs, ds_regs, gs_regs, fs_regs;
} stats;
- /* we can't really sanely deal with wraparound point in ringbuffer
- * and because of the way tiling works we can't really flush at
- * arbitrary points (without a big performance hit). When we get
- * too close to the end of the current ringbuffer, cycle to the next
- * one (and wait for pending rendering from next rb to complete).
- * We want the # of ringbuffers to be high enough that we don't
- * normally have to wait before resetting to the start of the next
- * rb.
- */
- struct fd_ringbuffer *rings[8];
- unsigned rings_idx;
-
- /* NOTE: currently using a single ringbuffer for both draw and
- * tiling commands, we need to make sure we need to leave enough
- * room at the end to append the tiling commands when we flush.
- * 0x7000 dwords should be a couple times more than we ever need
- * so should be a nice conservative threshold.
+ /* Current batch.. the rule here is that you can deref ctx->batch
+ * in codepaths from pipe_context entrypoints. But not in code-
+ * paths from fd_batch_flush() (basically, the stuff that gets
+ * called from GMEM code), since in those code-paths the batch
+ * you care about is not necessarily the same as ctx->batch.
*/
-#define FD_TILING_COMMANDS_DWORDS 0x7000
+ struct fd_batch *batch;
- /* normal draw/clear cmds: */
- struct fd_ringbuffer *ring;
- struct fd_ringmarker *draw_start, *draw_end;
-
- /* binning pass draw/clear cmds: */
- struct fd_ringbuffer *binning_ring;
- struct fd_ringmarker *binning_start, *binning_end;
+ /* NULL if there has been rendering since last flush. Otherwise
+ * keeps a reference to the last fence so we can re-use it rather
+ * than having to flush no-op batch.
+ */
+ struct pipe_fence_handle *last_fence;
- /* Keep track if WAIT_FOR_IDLE is needed for registers we need
- * to update via RMW:
+ /* track last known reset status globally and per-context to
+ * determine if more resets occurred since then. If global reset
+ * count increases, it means some other context crashed. If
+ * per-context reset count increases, it means we crashed the
+ * gpu.
*/
- bool needs_wfi;
+ uint32_t context_reset_count, global_reset_count;
- /* Do we need to re-emit RB_FRAME_BUFFER_DIMENSION? At least on a3xx
- * it is not a banked context register, so it needs a WFI to update.
- * Keep track if it has actually changed, to avoid unneeded WFI.
- * */
- bool needs_rb_fbd;
+ /* Are we in process of shadowing a resource? Used to detect recursion
+ * in transfer_map, and skip unneeded synchronization.
+ */
+ bool in_shadow : 1;
- /* Keep track of DRAW initiators that need to be patched up depending
- * on whether we using binning or not:
+ /* Ie. in blit situation where we no longer care about previous framebuffer
+ * contents. Main point is to eliminate blits from fd_try_shadow_resource().
+ * For example, in case of texture upload + gen-mipmaps.
*/
- struct util_dynarray draw_patches;
+ bool in_discard_blit : 1;
struct pipe_scissor_state scissor;
*/
struct pipe_scissor_state disabled_scissor;
- /* Track the maximal bounds of the scissor of all the draws within a
- * batch. Used at the tile rendering step (fd_gmem_render_tiles(),
- * mem2gmem/gmem2mem) to avoid needlessly moving data in/out of gmem.
- */
- struct pipe_scissor_state max_scissor;
-
- /* Track the cleared scissor for color/depth/stencil, so we know
- * which, if any, tiles need to be restored (mem2gmem). Only valid
- * if the corresponding bit in ctx->cleared is set.
- */
- struct {
- struct pipe_scissor_state color, depth, stencil;
- } cleared_scissor;
-
- /* Current gmem/tiling configuration.. gets updated on render_tiles()
- * if out of date with current maximal-scissor/cpp:
- */
- struct fd_gmem_stateobj gmem;
- struct fd_vsc_pipe pipe[8];
- struct fd_tile tile[512];
+ /* Per vsc pipe bo's (a2xx-a5xx): */
+ struct fd_bo *vsc_pipe_bo[32];
/* which state objects need to be re-emit'd: */
- enum {
- FD_DIRTY_BLEND = (1 << 0),
- FD_DIRTY_RASTERIZER = (1 << 1),
- FD_DIRTY_ZSA = (1 << 2),
- FD_DIRTY_FRAGTEX = (1 << 3),
- FD_DIRTY_VERTTEX = (1 << 4),
- FD_DIRTY_TEXSTATE = (1 << 5),
-
- FD_SHADER_DIRTY_VP = (1 << 6),
- FD_SHADER_DIRTY_FP = (1 << 7),
- /* skip geom/tcs/tes/compute */
- FD_DIRTY_PROG = FD_SHADER_DIRTY_FP | FD_SHADER_DIRTY_VP,
-
- FD_DIRTY_BLEND_COLOR = (1 << 12),
- FD_DIRTY_STENCIL_REF = (1 << 13),
- FD_DIRTY_SAMPLE_MASK = (1 << 14),
- FD_DIRTY_FRAMEBUFFER = (1 << 15),
- FD_DIRTY_STIPPLE = (1 << 16),
- FD_DIRTY_VIEWPORT = (1 << 17),
- FD_DIRTY_CONSTBUF = (1 << 18),
- FD_DIRTY_VTXSTATE = (1 << 19),
- FD_DIRTY_VTXBUF = (1 << 20),
- FD_DIRTY_INDEXBUF = (1 << 21),
- FD_DIRTY_SCISSOR = (1 << 22),
- FD_DIRTY_STREAMOUT = (1 << 23),
- FD_DIRTY_UCP = (1 << 24),
- FD_DIRTY_BLEND_DUAL = (1 << 25),
- } dirty;
+ enum fd_dirty_3d_state dirty;
+ /* per shader-stage dirty status: */
+ enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES];
+
+ void *compute;
struct pipe_blend_state *blend;
struct pipe_rasterizer_state *rasterizer;
struct pipe_depth_stencil_alpha_state *zsa;
- struct fd_texture_stateobj verttex, fragtex;
+ struct fd_texture_stateobj tex[PIPE_SHADER_TYPES];
struct fd_program_stateobj prog;
struct pipe_blend_color blend_color;
struct pipe_stencil_ref stencil_ref;
unsigned sample_mask;
+ unsigned min_samples;
+ /* local context fb state, for when ctx->batch is null: */
struct pipe_framebuffer_state framebuffer;
struct pipe_poly_stipple stipple;
struct pipe_viewport_state viewport;
+ struct pipe_scissor_state viewport_scissor;
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
- struct pipe_index_buffer indexbuf;
+ struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES];
+ struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES];
struct fd_streamout_stateobj streamout;
+ struct fd_global_bindings_stateobj global_bindings;
struct pipe_clip_state ucp;
struct pipe_query *cond_query;
struct pipe_debug_callback debug;
/* GMEM/tile handling fxns: */
- void (*emit_tile_init)(struct fd_context *ctx);
- void (*emit_tile_prep)(struct fd_context *ctx, struct fd_tile *tile);
- void (*emit_tile_mem2gmem)(struct fd_context *ctx, struct fd_tile *tile);
- void (*emit_tile_renderprep)(struct fd_context *ctx, struct fd_tile *tile);
- void (*emit_tile_gmem2mem)(struct fd_context *ctx, struct fd_tile *tile);
+ void (*emit_tile_init)(struct fd_batch *batch);
+ void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile);
+ void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
/* optional, for GMEM bypass: */
- void (*emit_sysmem_prep)(struct fd_context *ctx);
+ void (*emit_sysmem_prep)(struct fd_batch *batch);
+ void (*emit_sysmem_fini)(struct fd_batch *batch);
/* draw: */
- bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info);
- void (*clear)(struct fd_context *ctx, unsigned buffers,
+ bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info,
+ unsigned index_offset);
+ bool (*clear)(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil);
- /* constant emit: (note currently not used/needed for a2xx) */
- void (*emit_const)(struct fd_ringbuffer *ring, enum shader_t type,
- uint32_t regid, uint32_t offset, uint32_t sizedwords,
- const uint32_t *dwords, struct pipe_resource *prsc);
- /* emit bo addresses as constant: */
- void (*emit_const_bo)(struct fd_ringbuffer *ring, enum shader_t type, boolean write,
- uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets);
-
- /* indirect-branch emit: */
- void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringmarker *start,
- struct fd_ringmarker *end);
+ /* compute: */
+ void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info);
+
+ /* query: */
+ struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type, unsigned index);
+ void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
+ void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
+ struct fd_ringbuffer *ring);
+ void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage);
+
+ /* blitter: */
+ bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
+
+ /* handling for barriers: */
+ void (*framebuffer_barrier)(struct fd_context *ctx);
+
+ /* logger: */
+ void (*record_timestamp)(struct fd_ringbuffer *ring, struct fd_bo *bo, unsigned offset);
+ uint64_t (*ts_to_ns)(uint64_t ts);
+
+ struct list_head log_chunks; /* list of flushed log chunks in fifo order */
+ unsigned frame_nr; /* frame counter (for fd_log) */
+ FILE *log_out;
+
+ /*
+ * Common pre-cooked VBO state (used for a3xx and later):
+ */
+
+ /* for clear/gmem->mem vertices, and mem->gmem */
+ struct pipe_resource *solid_vbuf;
+
+ /* for mem->gmem tex coords: */
+ struct pipe_resource *blit_texcoord_vbuf;
+
+ /* vertex state for solid_vbuf:
+ * - solid_vbuf / 12 / R32G32B32_FLOAT
+ */
+ struct fd_vertex_state solid_vbuf_state;
+
+ /* vertex state for blit_prog:
+ * - blit_texcoord_vbuf / 8 / R32G32_FLOAT
+ * - solid_vbuf / 12 / R32G32B32_FLOAT
+ */
+ struct fd_vertex_state blit_vbuf_state;
+
+ /*
+ * Info about state of previous draw, for state that comes from
+ * pipe_draw_info (ie. not part of a CSO). This allows us to
+ * skip some register emit when the state doesn't change from
+ * draw-to-draw
+ */
+ struct {
+ bool dirty; /* last draw state unknown */
+ bool primitive_restart;
+ uint32_t index_start;
+ uint32_t instance_start;
+ uint32_t restart_index;
+ } last;
};
static inline struct fd_context *
return (struct fd_context *)pctx;
}
+static inline void
+fd_context_assert_locked(struct fd_context *ctx)
+{
+ pipe_mutex_assert_locked(ctx->screen->lock);
+}
+
+static inline void
+fd_context_lock(struct fd_context *ctx)
+{
+ mtx_lock(&ctx->screen->lock);
+}
+
+static inline void
+fd_context_unlock(struct fd_context *ctx)
+{
+ mtx_unlock(&ctx->screen->lock);
+}
+
+/* mark all state dirty: */
+static inline void
+fd_context_all_dirty(struct fd_context *ctx)
+{
+ ctx->last.dirty = true;
+ ctx->dirty = ~0;
+ for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++)
+ ctx->dirty_shader[i] = ~0;
+}
+
+static inline void
+fd_context_all_clean(struct fd_context *ctx)
+{
+ ctx->dirty = 0;
+ for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {
+ /* don't mark compute state as clean, since it is not emitted
+ * during normal draw call. The places that call _all_dirty(),
+ * it is safe to mark compute state dirty as well, but the
+ * inverse is not true.
+ */
+ if (i == PIPE_SHADER_COMPUTE)
+ continue;
+ ctx->dirty_shader[i] = 0;
+ }
+}
+
static inline struct pipe_scissor_state *
fd_context_get_scissor(struct fd_context *ctx)
{
return (1 << prim) & ctx->primtype_mask;
}
-static inline void
-fd_reset_wfi(struct fd_context *ctx)
-{
- ctx->needs_wfi = true;
-}
-
-/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
- * been one since last draw:
- */
-static inline void
-fd_wfi(struct fd_context *ctx, struct fd_ringbuffer *ring)
+static inline struct fd_batch *
+fd_context_batch(struct fd_context *ctx)
{
- if (ctx->needs_wfi) {
- OUT_WFI(ring);
- ctx->needs_wfi = false;
+ if (unlikely(!ctx->batch)) {
+ struct fd_batch *batch =
+ fd_batch_from_fb(&ctx->screen->batch_cache, ctx, &ctx->framebuffer);
+ util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer);
+ ctx->batch = batch;
+ fd_context_all_dirty(ctx);
}
+ return ctx->batch;
}
-/* emit a CP_EVENT_WRITE:
- */
static inline void
-fd_event_write(struct fd_context *ctx, struct fd_ringbuffer *ring,
- enum vgt_event_type evt)
+fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
{
- OUT_PKT3(ring, CP_EVENT_WRITE, 1);
- OUT_RING(ring, evt);
- fd_reset_wfi(ctx);
+ struct fd_context *ctx = batch->ctx;
+
+ if (ctx->query_set_stage)
+ ctx->query_set_stage(batch, stage);
+
+ batch->stage = stage;
}
+void fd_context_setup_common_vbos(struct fd_context *ctx);
+void fd_context_cleanup_common_vbos(struct fd_context *ctx);
+
struct pipe_context * fd_context_init(struct fd_context *ctx,
struct pipe_screen *pscreen, const uint8_t *primtypes,
- void *priv);
-
-void fd_context_render(struct pipe_context *pctx);
+ void *priv, unsigned flags);
void fd_context_destroy(struct pipe_context *pctx);