2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #ifndef FREEDRENO_CONTEXT_H_
28 #define FREEDRENO_CONTEXT_H_
30 #include "pipe/p_context.h"
31 #include "indices/u_primconvert.h"
32 #include "util/u_blitter.h"
33 #include "util/list.h"
34 #include "util/slab.h"
35 #include "util/u_string.h"
37 #include "freedreno_batch.h"
38 #include "freedreno_screen.h"
39 #include "freedreno_gmem.h"
40 #include "freedreno_util.h"
42 #define BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE)
44 struct fd_vertex_stateobj
;
46 struct fd_texture_stateobj
{
47 struct pipe_sampler_view
*textures
[PIPE_MAX_SAMPLERS
];
48 unsigned num_textures
;
49 unsigned valid_textures
;
50 struct pipe_sampler_state
*samplers
[PIPE_MAX_SAMPLERS
];
51 unsigned num_samplers
;
52 unsigned valid_samplers
;
53 /* number of samples per sampler, 2 bits per sampler: */
57 struct fd_program_stateobj
{
58 void *vs
, *hs
, *ds
, *gs
, *fs
;
61 struct fd_constbuf_stateobj
{
62 struct pipe_constant_buffer cb
[PIPE_MAX_CONSTANT_BUFFERS
];
63 uint32_t enabled_mask
;
66 struct fd_shaderbuf_stateobj
{
67 struct pipe_shader_buffer sb
[PIPE_MAX_SHADER_BUFFERS
];
68 uint32_t enabled_mask
;
69 uint32_t writable_mask
;
72 struct fd_shaderimg_stateobj
{
73 struct pipe_image_view si
[PIPE_MAX_SHADER_IMAGES
];
74 uint32_t enabled_mask
;
77 struct fd_vertexbuf_stateobj
{
78 struct pipe_vertex_buffer vb
[PIPE_MAX_ATTRIBS
];
80 uint32_t enabled_mask
;
83 struct fd_vertex_stateobj
{
84 struct pipe_vertex_element pipe
[PIPE_MAX_ATTRIBS
];
85 unsigned num_elements
;
88 struct fd_streamout_stateobj
{
89 struct pipe_stream_output_target
*targets
[PIPE_MAX_SO_BUFFERS
];
90 /* Bitmask of stream that should be reset. */
94 /* Track offset from vtxcnt for streamout data. This counter
95 * is just incremented by # of vertices on each draw until
96 * reset or new streamout buffer bound.
98 * When we eventually have GS, the CPU won't actually know the
99 * number of vertices per draw, so I think we'll have to do
100 * something more clever.
102 unsigned offsets
[PIPE_MAX_SO_BUFFERS
];
105 #define MAX_GLOBAL_BUFFERS 16
106 struct fd_global_bindings_stateobj
{
107 struct pipe_resource
*buf
[MAX_GLOBAL_BUFFERS
];
108 uint32_t enabled_mask
;
111 /* group together the vertex and vertexbuf state.. for ease of passing
112 * around, and because various internal operations (gmem<->mem, etc)
113 * need their own vertex state:
115 struct fd_vertex_state
{
116 struct fd_vertex_stateobj
*vtx
;
117 struct fd_vertexbuf_stateobj vertexbuf
;
120 /* global 3d pipeline dirty state: */
121 enum fd_dirty_3d_state
{
122 FD_DIRTY_BLEND
= BIT(0),
123 FD_DIRTY_RASTERIZER
= BIT(1),
124 FD_DIRTY_ZSA
= BIT(2),
125 FD_DIRTY_BLEND_COLOR
= BIT(3),
126 FD_DIRTY_STENCIL_REF
= BIT(4),
127 FD_DIRTY_SAMPLE_MASK
= BIT(5),
128 FD_DIRTY_FRAMEBUFFER
= BIT(6),
129 FD_DIRTY_STIPPLE
= BIT(7),
130 FD_DIRTY_VIEWPORT
= BIT(8),
131 FD_DIRTY_VTXSTATE
= BIT(9),
132 FD_DIRTY_VTXBUF
= BIT(10),
133 FD_DIRTY_MIN_SAMPLES
= BIT(11),
134 FD_DIRTY_SCISSOR
= BIT(12),
135 FD_DIRTY_STREAMOUT
= BIT(13),
136 FD_DIRTY_UCP
= BIT(14),
137 FD_DIRTY_BLEND_DUAL
= BIT(15),
139 /* These are a bit redundent with fd_dirty_shader_state, and possibly
140 * should be removed. (But OTOH kinda convenient in some places)
142 FD_DIRTY_PROG
= BIT(16),
143 FD_DIRTY_CONST
= BIT(17),
144 FD_DIRTY_TEX
= BIT(18),
145 FD_DIRTY_IMAGE
= BIT(19),
146 FD_DIRTY_SSBO
= BIT(20),
148 /* only used by a2xx.. possibly can be removed.. */
149 FD_DIRTY_TEXSTATE
= BIT(21),
151 /* fine grained state changes, for cases where state is not orthogonal
152 * from hw perspective:
154 FD_DIRTY_RASTERIZER_DISCARD
= BIT(24),
157 /* per shader-stage dirty state: */
158 enum fd_dirty_shader_state
{
159 FD_DIRTY_SHADER_PROG
= BIT(0),
160 FD_DIRTY_SHADER_CONST
= BIT(1),
161 FD_DIRTY_SHADER_TEX
= BIT(2),
162 FD_DIRTY_SHADER_SSBO
= BIT(3),
163 FD_DIRTY_SHADER_IMAGE
= BIT(4),
167 struct pipe_context base
;
169 struct list_head node
; /* node in screen->context_list */
171 /* We currently need to serialize emitting GMEM batches, because of
172 * VSC state access in the context.
174 * In practice this lock should not be contended, since pipe_context
175 * use should be single threaded. But it is needed to protect the
176 * case, with batch reordering where a ctxB batch triggers flushing
181 struct fd_device
*dev
;
182 struct fd_screen
*screen
;
183 struct fd_pipe
*pipe
;
185 struct blitter_context
*blitter
;
186 void *clear_rs_state
;
187 struct primconvert_context
*primconvert
;
189 /* slab for pipe_transfer allocations: */
190 struct slab_child_pool transfer_pool
;
193 * query related state:
196 /* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
197 struct slab_mempool sample_pool
;
198 struct slab_mempool sample_period_pool
;
200 /* sample-providers for hw queries: */
201 const struct fd_hw_sample_provider
*hw_sample_providers
[MAX_HW_SAMPLE_PROVIDERS
];
203 /* list of active queries: */
204 struct list_head hw_active_queries
;
206 /* sample-providers for accumulating hw queries: */
207 const struct fd_acc_sample_provider
*acc_sample_providers
[MAX_HW_SAMPLE_PROVIDERS
];
209 /* list of active accumulating queries: */
210 struct list_head acc_active_queries
;
213 /* Whether we need to walk the acc_active_queries next fd_set_stage() to
214 * update active queries (even if stage doesn't change).
216 bool update_active_queries
;
218 /* Current state of pctx->set_active_query_state() (i.e. "should drawing
219 * be counted against non-perfcounter queries")
223 /* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
224 * DI_PT_x value to use for draw initiator. There are some
225 * slight differences between generation:
227 const uint8_t *primtypes
;
228 uint32_t primtype_mask
;
230 /* shaders used by clear, and gmem->mem blits: */
231 struct fd_program_stateobj solid_prog
; // TODO move to screen?
233 /* shaders used by mem->gmem blits: */
234 struct fd_program_stateobj blit_prog
[MAX_RENDER_TARGETS
]; // TODO move to screen?
235 struct fd_program_stateobj blit_z
, blit_zs
;
240 uint64_t prims_emitted
;
241 uint64_t prims_generated
;
243 uint64_t batch_total
, batch_sysmem
, batch_gmem
, batch_nondraw
, batch_restore
;
244 uint64_t staging_uploads
, shadow_uploads
;
245 uint64_t vs_regs
, hs_regs
, ds_regs
, gs_regs
, fs_regs
;
248 /* Current batch.. the rule here is that you can deref ctx->batch
249 * in codepaths from pipe_context entrypoints. But not in code-
250 * paths from fd_batch_flush() (basically, the stuff that gets
251 * called from GMEM code), since in those code-paths the batch
252 * you care about is not necessarily the same as ctx->batch.
254 struct fd_batch
*batch
;
256 /* NULL if there has been rendering since last flush. Otherwise
257 * keeps a reference to the last fence so we can re-use it rather
258 * than having to flush no-op batch.
260 struct pipe_fence_handle
*last_fence
;
262 /* track last known reset status globally and per-context to
263 * determine if more resets occurred since then. If global reset
264 * count increases, it means some other context crashed. If
265 * per-context reset count increases, it means we crashed the
268 uint32_t context_reset_count
, global_reset_count
;
270 /* Are we in process of shadowing a resource? Used to detect recursion
271 * in transfer_map, and skip unneeded synchronization.
275 /* Ie. in blit situation where we no longer care about previous framebuffer
276 * contents. Main point is to eliminate blits from fd_try_shadow_resource().
277 * For example, in case of texture upload + gen-mipmaps.
279 bool in_discard_blit
: 1;
281 /* points to either scissor or disabled_scissor depending on rast state: */
282 struct pipe_scissor_state
*current_scissor
;
284 struct pipe_scissor_state scissor
;
286 /* we don't have a disable/enable bit for scissor, so instead we keep
287 * a disabled-scissor state which matches the entire bound framebuffer
288 * and use that when scissor is not enabled.
290 struct pipe_scissor_state disabled_scissor
;
292 /* Per vsc pipe bo's (a2xx-a5xx): */
293 struct fd_bo
*vsc_pipe_bo
[32];
295 /* which state objects need to be re-emit'd: */
296 enum fd_dirty_3d_state dirty
;
298 /* per shader-stage dirty status: */
299 enum fd_dirty_shader_state dirty_shader
[PIPE_SHADER_TYPES
];
302 struct pipe_blend_state
*blend
;
303 struct pipe_rasterizer_state
*rasterizer
;
304 struct pipe_depth_stencil_alpha_state
*zsa
;
306 struct fd_texture_stateobj tex
[PIPE_SHADER_TYPES
];
308 struct fd_program_stateobj prog
;
310 struct fd_vertex_state vtx
;
312 struct pipe_blend_color blend_color
;
313 struct pipe_stencil_ref stencil_ref
;
314 unsigned sample_mask
;
315 unsigned min_samples
;
316 /* local context fb state, for when ctx->batch is null: */
317 struct pipe_framebuffer_state framebuffer
;
318 struct pipe_poly_stipple stipple
;
319 struct pipe_viewport_state viewport
;
320 struct pipe_scissor_state viewport_scissor
;
321 struct fd_constbuf_stateobj constbuf
[PIPE_SHADER_TYPES
];
322 struct fd_shaderbuf_stateobj shaderbuf
[PIPE_SHADER_TYPES
];
323 struct fd_shaderimg_stateobj shaderimg
[PIPE_SHADER_TYPES
];
324 struct fd_streamout_stateobj streamout
;
325 struct fd_global_bindings_stateobj global_bindings
;
326 struct pipe_clip_state ucp
;
328 struct pipe_query
*cond_query
;
329 bool cond_cond
; /* inverted rendering condition */
332 struct pipe_debug_callback debug
;
334 /* Called on rebind_resource() for any per-gen cleanup required: */
335 void (*rebind_resource
)(struct fd_context
*ctx
, struct fd_resource
*rsc
);
337 /* GMEM/tile handling fxns: */
338 void (*emit_tile_init
)(struct fd_batch
*batch
);
339 void (*emit_tile_prep
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
340 void (*emit_tile_mem2gmem
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
341 void (*emit_tile_renderprep
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
342 void (*emit_tile
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
343 void (*emit_tile_gmem2mem
)(struct fd_batch
*batch
, const struct fd_tile
*tile
);
344 void (*emit_tile_fini
)(struct fd_batch
*batch
); /* optional */
346 /* optional, for GMEM bypass: */
347 void (*emit_sysmem_prep
)(struct fd_batch
*batch
);
348 void (*emit_sysmem_fini
)(struct fd_batch
*batch
);
351 bool (*draw_vbo
)(struct fd_context
*ctx
, const struct pipe_draw_info
*info
,
352 unsigned index_offset
);
353 bool (*clear
)(struct fd_context
*ctx
, unsigned buffers
,
354 const union pipe_color_union
*color
, double depth
, unsigned stencil
);
357 void (*launch_grid
)(struct fd_context
*ctx
, const struct pipe_grid_info
*info
);
360 struct fd_query
* (*create_query
)(struct fd_context
*ctx
, unsigned query_type
, unsigned index
);
361 void (*query_prepare
)(struct fd_batch
*batch
, uint32_t num_tiles
);
362 void (*query_prepare_tile
)(struct fd_batch
*batch
, uint32_t n
,
363 struct fd_ringbuffer
*ring
);
364 void (*query_set_stage
)(struct fd_batch
*batch
, enum fd_render_stage stage
);
367 bool (*blit
)(struct fd_context
*ctx
, const struct pipe_blit_info
*info
);
369 /* handling for barriers: */
370 void (*framebuffer_barrier
)(struct fd_context
*ctx
);
373 void (*record_timestamp
)(struct fd_ringbuffer
*ring
, struct fd_bo
*bo
, unsigned offset
);
374 uint64_t (*ts_to_ns
)(uint64_t ts
);
376 struct list_head log_chunks
; /* list of flushed log chunks in fifo order */
377 unsigned frame_nr
; /* frame counter (for fd_log) */
381 * Common pre-cooked VBO state (used for a3xx and later):
384 /* for clear/gmem->mem vertices, and mem->gmem */
385 struct pipe_resource
*solid_vbuf
;
387 /* for mem->gmem tex coords: */
388 struct pipe_resource
*blit_texcoord_vbuf
;
390 /* vertex state for solid_vbuf:
391 * - solid_vbuf / 12 / R32G32B32_FLOAT
393 struct fd_vertex_state solid_vbuf_state
;
395 /* vertex state for blit_prog:
396 * - blit_texcoord_vbuf / 8 / R32G32_FLOAT
397 * - solid_vbuf / 12 / R32G32B32_FLOAT
399 struct fd_vertex_state blit_vbuf_state
;
402 * Info about state of previous draw, for state that comes from
403 * pipe_draw_info (ie. not part of a CSO). This allows us to
404 * skip some register emit when the state doesn't change from
408 bool dirty
; /* last draw state unknown */
409 bool primitive_restart
;
410 uint32_t index_start
;
411 uint32_t instance_start
;
412 uint32_t restart_index
;
413 uint32_t streamout_mask
;
417 static inline struct fd_context
*
418 fd_context(struct pipe_context
*pctx
)
420 return (struct fd_context
*)pctx
;
424 fd_context_assert_locked(struct fd_context
*ctx
)
426 fd_screen_assert_locked(ctx
->screen
);
430 fd_context_lock(struct fd_context
*ctx
)
432 fd_screen_lock(ctx
->screen
);
436 fd_context_unlock(struct fd_context
*ctx
)
438 fd_screen_unlock(ctx
->screen
);
441 /* mark all state dirty: */
443 fd_context_all_dirty(struct fd_context
*ctx
)
445 ctx
->last
.dirty
= true;
447 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; i
++)
448 ctx
->dirty_shader
[i
] = ~0;
452 fd_context_all_clean(struct fd_context
*ctx
)
454 ctx
->last
.dirty
= false;
456 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
457 /* don't mark compute state as clean, since it is not emitted
458 * during normal draw call. The places that call _all_dirty(),
459 * it is safe to mark compute state dirty as well, but the
460 * inverse is not true.
462 if (i
== PIPE_SHADER_COMPUTE
)
464 ctx
->dirty_shader
[i
] = 0;
468 static inline struct pipe_scissor_state
*
469 fd_context_get_scissor(struct fd_context
*ctx
)
471 return ctx
->current_scissor
;
475 fd_supported_prim(struct fd_context
*ctx
, unsigned prim
)
477 return (1 << prim
) & ctx
->primtype_mask
;
480 static inline struct fd_batch
*
481 fd_context_batch(struct fd_context
*ctx
)
483 if (unlikely(!ctx
->batch
)) {
484 struct fd_batch
*batch
=
485 fd_batch_from_fb(&ctx
->screen
->batch_cache
, ctx
, &ctx
->framebuffer
);
486 util_copy_framebuffer_state(&batch
->framebuffer
, &ctx
->framebuffer
);
488 fd_context_all_dirty(ctx
);
494 fd_batch_set_stage(struct fd_batch
*batch
, enum fd_render_stage stage
)
496 struct fd_context
*ctx
= batch
->ctx
;
498 if (ctx
->query_set_stage
)
499 ctx
->query_set_stage(batch
, stage
);
501 batch
->stage
= stage
;
504 void fd_context_setup_common_vbos(struct fd_context
*ctx
);
505 void fd_context_cleanup_common_vbos(struct fd_context
*ctx
);
506 void fd_emit_string(struct fd_ringbuffer
*ring
, const char *string
, int len
);
507 void fd_emit_string5(struct fd_ringbuffer
*ring
, const char *string
, int len
);
509 struct pipe_context
* fd_context_init(struct fd_context
*ctx
,
510 struct pipe_screen
*pscreen
, const uint8_t *primtypes
,
511 void *priv
, unsigned flags
);
513 void fd_context_destroy(struct pipe_context
*pctx
);
515 #endif /* FREEDRENO_CONTEXT_H_ */