2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #ifndef FREEDRENO_CONTEXT_H_
28 #define FREEDRENO_CONTEXT_H_
32 #include "pipe/p_context.h"
33 #include "indices/u_primconvert.h"
34 #include "util/u_blitter.h"
35 #include "util/list.h"
36 #include "util/slab.h"
37 #include "util/u_string.h"
39 #include "freedreno_batch.h"
40 #include "freedreno_screen.h"
41 #include "freedreno_gmem.h"
42 #include "freedreno_util.h"
44 #define BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE)
46 struct fd_vertex_stateobj
;
48 struct fd_texture_stateobj
{
49 struct pipe_sampler_view
*textures
[PIPE_MAX_SAMPLERS
];
50 unsigned num_textures
;
51 unsigned valid_textures
;
52 struct pipe_sampler_state
*samplers
[PIPE_MAX_SAMPLERS
];
53 unsigned num_samplers
;
54 unsigned valid_samplers
;
55 /* number of samples per sampler, 2 bits per sampler: */
59 struct fd_program_stateobj
{
60 void *vs
, *hs
, *ds
, *gs
, *fs
;
63 struct fd_constbuf_stateobj
{
64 struct pipe_constant_buffer cb
[PIPE_MAX_CONSTANT_BUFFERS
];
65 uint32_t enabled_mask
;
68 struct fd_shaderbuf_stateobj
{
69 struct pipe_shader_buffer sb
[PIPE_MAX_SHADER_BUFFERS
];
70 uint32_t enabled_mask
;
71 uint32_t writable_mask
;
74 struct fd_shaderimg_stateobj
{
75 struct pipe_image_view si
[PIPE_MAX_SHADER_IMAGES
];
76 uint32_t enabled_mask
;
79 struct fd_vertexbuf_stateobj
{
80 struct pipe_vertex_buffer vb
[PIPE_MAX_ATTRIBS
];
82 uint32_t enabled_mask
;
85 struct fd_vertex_stateobj
{
86 struct pipe_vertex_element pipe
[PIPE_MAX_ATTRIBS
];
87 unsigned num_elements
;
90 struct fd_streamout_stateobj
{
91 struct pipe_stream_output_target
*targets
[PIPE_MAX_SO_BUFFERS
];
92 /* Bitmask of stream that should be reset. */
96 /* Track offset from vtxcnt for streamout data. This counter
97 * is just incremented by # of vertices on each draw until
98 * reset or new streamout buffer bound.
100 * When we eventually have GS, the CPU won't actually know the
101 * number of vertices per draw, so I think we'll have to do
102 * something more clever.
104 unsigned offsets
[PIPE_MAX_SO_BUFFERS
];
107 #define MAX_GLOBAL_BUFFERS 16
108 struct fd_global_bindings_stateobj
{
109 struct pipe_resource
*buf
[MAX_GLOBAL_BUFFERS
];
110 uint32_t enabled_mask
;
113 /* group together the vertex and vertexbuf state.. for ease of passing
114 * around, and because various internal operations (gmem<->mem, etc)
115 * need their own vertex state:
117 struct fd_vertex_state
{
118 struct fd_vertex_stateobj
*vtx
;
119 struct fd_vertexbuf_stateobj vertexbuf
;
122 /* global 3d pipeline dirty state: */
123 enum fd_dirty_3d_state
{
124 FD_DIRTY_BLEND
= BIT(0),
125 FD_DIRTY_RASTERIZER
= BIT(1),
126 FD_DIRTY_ZSA
= BIT(2),
127 FD_DIRTY_BLEND_COLOR
= BIT(3),
128 FD_DIRTY_STENCIL_REF
= BIT(4),
129 FD_DIRTY_SAMPLE_MASK
= BIT(5),
130 FD_DIRTY_FRAMEBUFFER
= BIT(6),
131 FD_DIRTY_STIPPLE
= BIT(7),
132 FD_DIRTY_VIEWPORT
= BIT(8),
133 FD_DIRTY_VTXSTATE
= BIT(9),
134 FD_DIRTY_VTXBUF
= BIT(10),
135 FD_DIRTY_MIN_SAMPLES
= BIT(11),
136 FD_DIRTY_SCISSOR
= BIT(12),
137 FD_DIRTY_STREAMOUT
= BIT(13),
138 FD_DIRTY_UCP
= BIT(14),
139 FD_DIRTY_BLEND_DUAL
= BIT(15),
141 /* These are a bit redundent with fd_dirty_shader_state, and possibly
142 * should be removed. (But OTOH kinda convenient in some places)
144 FD_DIRTY_PROG
= BIT(16),
145 FD_DIRTY_CONST
= BIT(17),
146 FD_DIRTY_TEX
= BIT(18),
147 FD_DIRTY_IMAGE
= BIT(19),
148 FD_DIRTY_SSBO
= BIT(20),
150 /* only used by a2xx.. possibly can be removed.. */
151 FD_DIRTY_TEXSTATE
= BIT(21),
153 /* fine grained state changes, for cases where state is not orthogonal
154 * from hw perspective:
156 FD_DIRTY_RASTERIZER_DISCARD
= BIT(24),
159 /* per shader-stage dirty state: */
160 enum fd_dirty_shader_state
{
161 FD_DIRTY_SHADER_PROG
= BIT(0),
162 FD_DIRTY_SHADER_CONST
= BIT(1),
163 FD_DIRTY_SHADER_TEX
= BIT(2),
164 FD_DIRTY_SHADER_SSBO
= BIT(3),
165 FD_DIRTY_SHADER_IMAGE
= BIT(4),
169 struct pipe_context base
;
171 struct list_head node
; /* node in screen->context_list */
173 /* We currently need to serialize emitting GMEM batches, because of
174 * VSC state access in the context.
176 * In practice this lock should not be contended, since pipe_context
177 * use should be single threaded. But it is needed to protect the
178 * case, with batch reordering where a ctxB batch triggers flushing
183 struct fd_device
*dev
;
184 struct fd_screen
*screen
;
185 struct fd_pipe
*pipe
;
187 struct blitter_context
*blitter
;
188 void *clear_rs_state
;
189 struct primconvert_context
*primconvert
;
191 /* slab for pipe_transfer allocations: */
192 struct slab_child_pool transfer_pool
;
195 * query related state:
198 /* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
199 struct slab_mempool sample_pool
;
200 struct slab_mempool sample_period_pool
;
202 /* sample-providers for hw queries: */
203 const struct fd_hw_sample_provider
*hw_sample_providers
[MAX_HW_SAMPLE_PROVIDERS
];
205 /* list of active queries: */
206 struct list_head hw_active_queries
;
208 /* sample-providers for accumulating hw queries: */
209 const struct fd_acc_sample_provider
*acc_sample_providers
[MAX_HW_SAMPLE_PROVIDERS
];
211 /* list of active accumulating queries: */
212 struct list_head acc_active_queries
;
215 /* Whether we need to walk the acc_active_queries next fd_set_stage() to
216 * update active queries (even if stage doesn't change).
218 bool update_active_queries
;
220 /* Current state of pctx->set_active_query_state() (i.e. "should drawing
221 * be counted against non-perfcounter queries")
225 /* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
226 * DI_PT_x value to use for draw initiator. There are some
227 * slight differences between generation:
229 const uint8_t *primtypes
;
230 uint32_t primtype_mask
;
232 /* shaders used by clear, and gmem->mem blits: */
233 struct fd_program_stateobj solid_prog
; // TODO move to screen?
235 /* shaders used by mem->gmem blits: */
236 struct fd_program_stateobj blit_prog
[MAX_RENDER_TARGETS
]; // TODO move to screen?
237 struct fd_program_stateobj blit_z
, blit_zs
;
242 uint64_t prims_emitted
;
243 uint64_t prims_generated
;
245 uint64_t batch_total
, batch_sysmem
, batch_gmem
, batch_nondraw
, batch_restore
;
246 uint64_t staging_uploads
, shadow_uploads
;
247 uint64_t vs_regs
, hs_regs
, ds_regs
, gs_regs
, fs_regs
;
250 /* Current batch.. the rule here is that you can deref ctx->batch
251 * in codepaths from pipe_context entrypoints. But not in code-
252 * paths from fd_batch_flush() (basically, the stuff that gets
253 * called from GMEM code), since in those code-paths the batch
254 * you care about is not necessarily the same as ctx->batch.
256 struct fd_batch
*batch
;
258 /* NULL if there has been rendering since last flush. Otherwise
259 * keeps a reference to the last fence so we can re-use it rather
260 * than having to flush no-op batch.
262 struct pipe_fence_handle
*last_fence
;
264 /* Fence fd we are told to wait on via ->fence_server_sync() (or -1
265 * if none). The in-fence is transferred over to the batch on the
266 * next draw/blit/grid.
268 * The reason for this extra complexity is that apps will typically
269 * do eglWaitSyncKHR()/etc at the beginning of the frame, before the
270 * first draw. But mesa/st doesn't flush down framebuffer state
271 * change until we hit a draw, so at ->fence_server_sync() time, we
272 * don't yet have the correct batch. If we created a batch at that
273 * point, it would be the wrong one, and we'd have to flush it pre-
274 * maturely, causing us to stall early in the frame where we could
275 * be building up cmdstream.
279 /* track last known reset status globally and per-context to
280 * determine if more resets occurred since then. If global reset
281 * count increases, it means some other context crashed. If
282 * per-context reset count increases, it means we crashed the
285 uint32_t context_reset_count
, global_reset_count
;
287 /* Are we in process of shadowing a resource? Used to detect recursion
288 * in transfer_map, and skip unneeded synchronization.
292 /* Ie. in blit situation where we no longer care about previous framebuffer
293 * contents. Main point is to eliminate blits from fd_try_shadow_resource().
294 * For example, in case of texture upload + gen-mipmaps.
296 bool in_discard_blit
: 1;
298 /* points to either scissor or disabled_scissor depending on rast state: */
299 struct pipe_scissor_state
*current_scissor
;
301 struct pipe_scissor_state scissor
;
303 /* we don't have a disable/enable bit for scissor, so instead we keep
304 * a disabled-scissor state which matches the entire bound framebuffer
305 * and use that when scissor is not enabled.
307 struct pipe_scissor_state disabled_scissor
;
309 /* Per vsc pipe bo's (a2xx-a5xx): */
310 struct fd_bo
*vsc_pipe_bo
[32];
312 /* which state objects need to be re-emit'd: */
313 enum fd_dirty_3d_state dirty
;
315 /* per shader-stage dirty status: */
316 enum fd_dirty_shader_state dirty_shader
[PIPE_SHADER_TYPES
];
319 struct pipe_blend_state
*blend
;
320 struct pipe_rasterizer_state
*rasterizer
;
321 struct pipe_depth_stencil_alpha_state
*zsa
;
323 struct fd_texture_stateobj tex
[PIPE_SHADER_TYPES
];
325 struct fd_program_stateobj prog
;
327 struct fd_vertex_state vtx
;
329 struct pipe_blend_color blend_color
;
330 struct pipe_stencil_ref stencil_ref
;
331 unsigned sample_mask
;
332 unsigned min_samples
;
333 /* local context fb state, for when ctx->batch is null: */
334 struct pipe_framebuffer_state framebuffer
;
335 struct pipe_poly_stipple stipple
;
336 struct pipe_viewport_state viewport
;
337 struct pipe_scissor_state viewport_scissor
;
338 struct fd_constbuf_stateobj constbuf
[PIPE_SHADER_TYPES
];
339 struct fd_shaderbuf_stateobj shaderbuf
[PIPE_SHADER_TYPES
];
340 struct fd_shaderimg_stateobj shaderimg
[PIPE_SHADER_TYPES
];
341 struct fd_streamout_stateobj streamout
;
342 struct fd_global_bindings_stateobj global_bindings
;
343 struct pipe_clip_state ucp
;
345 struct pipe_query
*cond_query
;
346 bool cond_cond
; /* inverted rendering condition */
349 struct pipe_debug_callback debug
;
351 /* Called on rebind_resource() for any per-gen cleanup required: */
352 void (*rebind_resource
)(struct fd_context
*ctx
, struct fd_resource
*rsc
);
354 /* GMEM/tile handling fxns: */
355 void (*emit_tile_init
)(struct fd_batch
*batch
);
356 void (*emit_tile_prep
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
357 void (*emit_tile_mem2gmem
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
358 void (*emit_tile_renderprep
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
359 void (*emit_tile
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
360 void (*emit_tile_gmem2mem
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
361 void (*emit_tile_fini
)(struct fd_batch
*batch
); /* optional */
363 /* optional, for GMEM bypass: */
364 void (*emit_sysmem_prep
)(struct fd_batch
*batch
);
365 void (*emit_sysmem_fini
)(struct fd_batch
*batch
);
368 bool (*draw_vbo
)(struct fd_context
*ctx
, const struct pipe_draw_info
*info
,
369 unsigned index_offset
);
370 bool (*clear
)(struct fd_context
*ctx
, unsigned buffers
,
371 const union pipe_color_union
*color
, double depth
, unsigned stencil
);
374 void (*launch_grid
)(struct fd_context
*ctx
, const struct pipe_grid_info
*info
);
377 struct fd_query
* (*create_query
)(struct fd_context
*ctx
, unsigned query_type
, unsigned index
);
378 void (*query_prepare
)(struct fd_batch
*batch
, uint32_t num_tiles
);
379 void (*query_prepare_tile
)(struct fd_batch
*batch
, uint32_t n
,
380 struct fd_ringbuffer
*ring
);
381 void (*query_set_stage
)(struct fd_batch
*batch
, enum fd_render_stage stage
);
384 bool (*blit
)(struct fd_context
*ctx
, const struct pipe_blit_info
*info
);
385 void (*clear_ubwc
)(struct fd_batch
*batch
, struct fd_resource
*rsc
);
387 /* handling for barriers: */
388 void (*framebuffer_barrier
)(struct fd_context
*ctx
);
391 void (*record_timestamp
)(struct fd_ringbuffer
*ring
, struct fd_bo
*bo
, unsigned offset
);
392 uint64_t (*ts_to_ns
)(uint64_t ts
);
394 struct list_head log_chunks
; /* list of flushed log chunks in fifo order */
395 unsigned frame_nr
; /* frame counter (for fd_log) */
399 * Common pre-cooked VBO state (used for a3xx and later):
402 /* for clear/gmem->mem vertices, and mem->gmem */
403 struct pipe_resource
*solid_vbuf
;
405 /* for mem->gmem tex coords: */
406 struct pipe_resource
*blit_texcoord_vbuf
;
408 /* vertex state for solid_vbuf:
409 * - solid_vbuf / 12 / R32G32B32_FLOAT
411 struct fd_vertex_state solid_vbuf_state
;
413 /* vertex state for blit_prog:
414 * - blit_texcoord_vbuf / 8 / R32G32_FLOAT
415 * - solid_vbuf / 12 / R32G32B32_FLOAT
417 struct fd_vertex_state blit_vbuf_state
;
420 * Info about state of previous draw, for state that comes from
421 * pipe_draw_info (ie. not part of a CSO). This allows us to
422 * skip some register emit when the state doesn't change from
426 bool dirty
; /* last draw state unknown */
427 bool primitive_restart
;
428 uint32_t index_start
;
429 uint32_t instance_start
;
430 uint32_t restart_index
;
431 uint32_t streamout_mask
;
435 static inline struct fd_context
*
436 fd_context(struct pipe_context
*pctx
)
438 return (struct fd_context
*)pctx
;
442 fd_context_assert_locked(struct fd_context
*ctx
)
444 fd_screen_assert_locked(ctx
->screen
);
448 fd_context_lock(struct fd_context
*ctx
)
450 fd_screen_lock(ctx
->screen
);
454 fd_context_unlock(struct fd_context
*ctx
)
456 fd_screen_unlock(ctx
->screen
);
459 /* mark all state dirty: */
461 fd_context_all_dirty(struct fd_context
*ctx
)
463 ctx
->last
.dirty
= true;
465 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; i
++)
466 ctx
->dirty_shader
[i
] = ~0;
470 fd_context_all_clean(struct fd_context
*ctx
)
472 ctx
->last
.dirty
= false;
474 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
475 /* don't mark compute state as clean, since it is not emitted
476 * during normal draw call. The places that call _all_dirty(),
477 * it is safe to mark compute state dirty as well, but the
478 * inverse is not true.
480 if (i
== PIPE_SHADER_COMPUTE
)
482 ctx
->dirty_shader
[i
] = 0;
486 static inline struct pipe_scissor_state
*
487 fd_context_get_scissor(struct fd_context
*ctx
)
489 return ctx
->current_scissor
;
493 fd_supported_prim(struct fd_context
*ctx
, unsigned prim
)
495 return (1 << prim
) & ctx
->primtype_mask
;
499 * If we have a pending fence_server_sync() (GPU side sync), flush now.
500 * The alternative to try to track this with batch dependencies gets
503 * Call this before switching to a different batch, to handle this case.
506 fd_context_switch_from(struct fd_context
*ctx
)
508 if (ctx
->batch
&& (ctx
->batch
->in_fence_fd
!= -1))
509 fd_batch_flush(ctx
->batch
);
513 * If there is a pending fence-fd that we need to sync on, this will
514 * transfer the reference to the next batch we are going to render
518 fd_context_switch_to(struct fd_context
*ctx
, struct fd_batch
*batch
)
520 if (ctx
->in_fence_fd
!= -1) {
521 sync_accumulate("freedreno", &batch
->in_fence_fd
, ctx
->in_fence_fd
);
522 ctx
->in_fence_fd
= -1;
526 static inline struct fd_batch
*
527 fd_context_batch(struct fd_context
*ctx
)
529 if (unlikely(!ctx
->batch
)) {
530 struct fd_batch
*batch
=
531 fd_batch_from_fb(&ctx
->screen
->batch_cache
, ctx
, &ctx
->framebuffer
);
532 util_copy_framebuffer_state(&batch
->framebuffer
, &ctx
->framebuffer
);
534 fd_context_all_dirty(ctx
);
536 fd_context_switch_to(ctx
, ctx
->batch
);
541 fd_batch_set_stage(struct fd_batch
*batch
, enum fd_render_stage stage
)
543 struct fd_context
*ctx
= batch
->ctx
;
545 if (ctx
->query_set_stage
)
546 ctx
->query_set_stage(batch
, stage
);
548 batch
->stage
= stage
;
551 void fd_context_setup_common_vbos(struct fd_context
*ctx
);
552 void fd_context_cleanup_common_vbos(struct fd_context
*ctx
);
553 void fd_emit_string(struct fd_ringbuffer
*ring
, const char *string
, int len
);
554 void fd_emit_string5(struct fd_ringbuffer
*ring
, const char *string
, int len
);
556 struct pipe_context
* fd_context_init(struct fd_context
*ctx
,
557 struct pipe_screen
*pscreen
, const uint8_t *primtypes
,
558 void *priv
, unsigned flags
);
560 void fd_context_destroy(struct pipe_context
*pctx
);
562 #endif /* FREEDRENO_CONTEXT_H_ */