1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
29 #ifndef FREEDRENO_CONTEXT_H_
30 #define FREEDRENO_CONTEXT_H_
32 #include "pipe/p_context.h"
33 #include "indices/u_primconvert.h"
34 #include "util/u_blitter.h"
35 #include "util/list.h"
36 #include "util/slab.h"
37 #include "util/u_string.h"
39 #include "freedreno_batch.h"
40 #include "freedreno_screen.h"
41 #include "freedreno_gmem.h"
42 #include "freedreno_util.h"
44 #define BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE)
46 struct fd_vertex_stateobj
;
48 struct fd_texture_stateobj
{
49 struct pipe_sampler_view
*textures
[PIPE_MAX_SAMPLERS
];
50 unsigned num_textures
;
51 unsigned valid_textures
;
52 struct pipe_sampler_state
*samplers
[PIPE_MAX_SAMPLERS
];
53 unsigned num_samplers
;
54 unsigned valid_samplers
;
57 struct fd_program_stateobj
{
60 /* rest only used by fd2.. split out: */
62 /* Indexed by semantic name or TGSI_SEMANTIC_COUNT + semantic index
63 * for TGSI_SEMANTIC_GENERIC. Special vs exports (position and point-
64 * size) are not included in this
66 uint8_t export_linkage
[63];
69 struct fd_constbuf_stateobj
{
70 struct pipe_constant_buffer cb
[PIPE_MAX_CONSTANT_BUFFERS
];
71 uint32_t enabled_mask
;
75 struct fd_shaderbuf_stateobj
{
76 struct pipe_shader_buffer sb
[PIPE_MAX_SHADER_BUFFERS
];
77 uint32_t enabled_mask
;
81 struct fd_shaderimg_stateobj
{
82 struct pipe_image_view si
[PIPE_MAX_SHADER_IMAGES
];
83 uint32_t enabled_mask
;
87 struct fd_vertexbuf_stateobj
{
88 struct pipe_vertex_buffer vb
[PIPE_MAX_ATTRIBS
];
90 uint32_t enabled_mask
;
94 struct fd_vertex_stateobj
{
95 struct pipe_vertex_element pipe
[PIPE_MAX_ATTRIBS
];
96 unsigned num_elements
;
99 struct fd_streamout_stateobj
{
100 struct pipe_stream_output_target
*targets
[PIPE_MAX_SO_BUFFERS
];
101 unsigned num_targets
;
102 /* Track offset from vtxcnt for streamout data. This counter
103 * is just incremented by # of vertices on each draw until
104 * reset or new streamout buffer bound.
106 * When we eventually have GS, the CPU won't actually know the
107 * number of vertices per draw, so I think we'll have to do
108 * something more clever.
110 unsigned offsets
[PIPE_MAX_SO_BUFFERS
];
113 /* group together the vertex and vertexbuf state.. for ease of passing
114 * around, and because various internal operations (gmem<->mem, etc)
115 * need their own vertex state:
117 struct fd_vertex_state
{
118 struct fd_vertex_stateobj
*vtx
;
119 struct fd_vertexbuf_stateobj vertexbuf
;
122 /* global 3d pipeline dirty state: */
123 enum fd_dirty_3d_state
{
124 FD_DIRTY_BLEND
= BIT(0),
125 FD_DIRTY_RASTERIZER
= BIT(1),
126 FD_DIRTY_ZSA
= BIT(2),
127 FD_DIRTY_BLEND_COLOR
= BIT(3),
128 FD_DIRTY_STENCIL_REF
= BIT(4),
129 FD_DIRTY_SAMPLE_MASK
= BIT(5),
130 FD_DIRTY_FRAMEBUFFER
= BIT(6),
131 FD_DIRTY_STIPPLE
= BIT(7),
132 FD_DIRTY_VIEWPORT
= BIT(8),
133 FD_DIRTY_VTXSTATE
= BIT(9),
134 FD_DIRTY_VTXBUF
= BIT(10),
136 FD_DIRTY_SCISSOR
= BIT(12),
137 FD_DIRTY_STREAMOUT
= BIT(13),
138 FD_DIRTY_UCP
= BIT(14),
139 FD_DIRTY_BLEND_DUAL
= BIT(15),
141 /* These are a bit redundent with fd_dirty_shader_state, and possibly
142 * should be removed. (But OTOH kinda convenient in some places)
144 FD_DIRTY_PROG
= BIT(16),
145 FD_DIRTY_CONST
= BIT(17),
146 FD_DIRTY_TEX
= BIT(18),
148 /* only used by a2xx.. possibly can be removed.. */
149 FD_DIRTY_TEXSTATE
= BIT(19),
152 /* per shader-stage dirty state: */
153 enum fd_dirty_shader_state
{
154 FD_DIRTY_SHADER_PROG
= BIT(0),
155 FD_DIRTY_SHADER_CONST
= BIT(1),
156 FD_DIRTY_SHADER_TEX
= BIT(2),
157 FD_DIRTY_SHADER_SSBO
= BIT(3),
158 FD_DIRTY_SHADER_IMAGE
= BIT(4),
162 struct pipe_context base
;
164 struct fd_device
*dev
;
165 struct fd_screen
*screen
;
166 struct fd_pipe
*pipe
;
168 struct util_queue flush_queue
;
170 struct blitter_context
*blitter
;
171 void *clear_rs_state
;
172 struct primconvert_context
*primconvert
;
174 /* slab for pipe_transfer allocations: */
175 struct slab_child_pool transfer_pool
;
178 * query related state:
181 /* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
182 struct slab_mempool sample_pool
;
183 struct slab_mempool sample_period_pool
;
185 /* sample-providers for hw queries: */
186 const struct fd_hw_sample_provider
*hw_sample_providers
[MAX_HW_SAMPLE_PROVIDERS
];
188 /* list of active queries: */
189 struct list_head hw_active_queries
;
191 /* sample-providers for accumulating hw queries: */
192 const struct fd_acc_sample_provider
*acc_sample_providers
[MAX_HW_SAMPLE_PROVIDERS
];
194 /* list of active accumulating queries: */
195 struct list_head acc_active_queries
;
198 /* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
199 * DI_PT_x value to use for draw initiator. There are some
200 * slight differences between generation:
202 const uint8_t *primtypes
;
203 uint32_t primtype_mask
;
205 /* shaders used by clear, and gmem->mem blits: */
206 struct fd_program_stateobj solid_prog
; // TODO move to screen?
208 /* shaders used by mem->gmem blits: */
209 struct fd_program_stateobj blit_prog
[MAX_RENDER_TARGETS
]; // TODO move to screen?
210 struct fd_program_stateobj blit_z
, blit_zs
;
215 uint64_t prims_emitted
;
216 uint64_t prims_generated
;
218 uint64_t batch_total
, batch_sysmem
, batch_gmem
, batch_nondraw
, batch_restore
;
219 uint64_t staging_uploads
, shadow_uploads
;
222 /* Current batch.. the rule here is that you can deref ctx->batch
223 * in codepaths from pipe_context entrypoints. But not in code-
224 * paths from fd_batch_flush() (basically, the stuff that gets
225 * called from GMEM code), since in those code-paths the batch
226 * you care about is not necessarily the same as ctx->batch.
228 struct fd_batch
*batch
;
230 /* Are we in process of shadowing a resource? Used to detect recursion
231 * in transfer_map, and skip unneeded synchronization.
235 /* Ie. in blit situation where we no longer care about previous framebuffer
236 * contents. Main point is to eliminate blits from fd_try_shadow_resource().
237 * For example, in case of texture upload + gen-mipmaps.
241 struct pipe_scissor_state scissor
;
243 /* we don't have a disable/enable bit for scissor, so instead we keep
244 * a disabled-scissor state which matches the entire bound framebuffer
245 * and use that when scissor is not enabled.
247 struct pipe_scissor_state disabled_scissor
;
249 /* Current gmem/tiling configuration.. gets updated on render_tiles()
250 * if out of date with current maximal-scissor/cpp:
252 * (NOTE: this is kind of related to the batch, but moving it there
253 * means we'd always have to recalc tiles ever batch)
255 struct fd_gmem_stateobj gmem
;
256 struct fd_vsc_pipe vsc_pipe
[16];
257 struct fd_tile tile
[512];
259 /* which state objects need to be re-emit'd: */
260 enum fd_dirty_3d_state dirty
;
262 /* per shader-stage dirty status: */
263 enum fd_dirty_shader_state dirty_shader
[PIPE_SHADER_TYPES
];
266 struct pipe_blend_state
*blend
;
267 struct pipe_rasterizer_state
*rasterizer
;
268 struct pipe_depth_stencil_alpha_state
*zsa
;
270 struct fd_texture_stateobj tex
[PIPE_SHADER_TYPES
];
272 struct fd_program_stateobj prog
;
274 struct fd_vertex_state vtx
;
276 struct pipe_blend_color blend_color
;
277 struct pipe_stencil_ref stencil_ref
;
278 unsigned sample_mask
;
279 struct pipe_poly_stipple stipple
;
280 struct pipe_viewport_state viewport
;
281 struct fd_constbuf_stateobj constbuf
[PIPE_SHADER_TYPES
];
282 struct fd_shaderbuf_stateobj shaderbuf
[PIPE_SHADER_TYPES
];
283 struct fd_shaderimg_stateobj shaderimg
[PIPE_SHADER_TYPES
];
284 struct fd_streamout_stateobj streamout
;
285 struct pipe_clip_state ucp
;
287 struct pipe_query
*cond_query
;
288 bool cond_cond
; /* inverted rendering condition */
291 struct pipe_debug_callback debug
;
293 /* GMEM/tile handling fxns: */
294 void (*emit_tile_init
)(struct fd_batch
*batch
);
295 void (*emit_tile_prep
)(struct fd_batch
*batch
, struct fd_tile
*tile
);
296 void (*emit_tile_mem2gmem
)(struct fd_batch
*batch
, struct fd_tile
*tile
);
297 void (*emit_tile_renderprep
)(struct fd_batch
*batch
, struct fd_tile
*tile
);
298 void (*emit_tile_gmem2mem
)(struct fd_batch
*batch
, struct fd_tile
*tile
);
299 void (*emit_tile_fini
)(struct fd_batch
*batch
); /* optional */
301 /* optional, for GMEM bypass: */
302 void (*emit_sysmem_prep
)(struct fd_batch
*batch
);
303 void (*emit_sysmem_fini
)(struct fd_batch
*batch
);
306 bool (*draw_vbo
)(struct fd_context
*ctx
, const struct pipe_draw_info
*info
,
307 unsigned index_offset
);
308 bool (*clear
)(struct fd_context
*ctx
, unsigned buffers
,
309 const union pipe_color_union
*color
, double depth
, unsigned stencil
);
312 void (*launch_grid
)(struct fd_context
*ctx
, const struct pipe_grid_info
*info
);
314 /* constant emit: (note currently not used/needed for a2xx) */
315 void (*emit_const
)(struct fd_ringbuffer
*ring
, enum shader_t type
,
316 uint32_t regid
, uint32_t offset
, uint32_t sizedwords
,
317 const uint32_t *dwords
, struct pipe_resource
*prsc
);
318 /* emit bo addresses as constant: */
319 void (*emit_const_bo
)(struct fd_ringbuffer
*ring
, enum shader_t type
, boolean write
,
320 uint32_t regid
, uint32_t num
, struct pipe_resource
**prscs
, uint32_t *offsets
);
322 /* indirect-branch emit: */
323 void (*emit_ib
)(struct fd_ringbuffer
*ring
, struct fd_ringbuffer
*target
);
326 struct fd_query
* (*create_query
)(struct fd_context
*ctx
, unsigned query_type
);
327 void (*query_prepare
)(struct fd_batch
*batch
, uint32_t num_tiles
);
328 void (*query_prepare_tile
)(struct fd_batch
*batch
, uint32_t n
,
329 struct fd_ringbuffer
*ring
);
330 void (*query_set_stage
)(struct fd_batch
*batch
, enum fd_render_stage stage
);
333 void (*blit
)(struct fd_context
*ctx
, const struct pipe_blit_info
*info
);
336 * Common pre-cooked VBO state (used for a3xx and later):
339 /* for clear/gmem->mem vertices, and mem->gmem */
340 struct pipe_resource
*solid_vbuf
;
342 /* for mem->gmem tex coords: */
343 struct pipe_resource
*blit_texcoord_vbuf
;
345 /* vertex state for solid_vbuf:
346 * - solid_vbuf / 12 / R32G32B32_FLOAT
348 struct fd_vertex_state solid_vbuf_state
;
350 /* vertex state for blit_prog:
351 * - blit_texcoord_vbuf / 8 / R32G32_FLOAT
352 * - solid_vbuf / 12 / R32G32B32_FLOAT
354 struct fd_vertex_state blit_vbuf_state
;
357 static inline struct fd_context
*
358 fd_context(struct pipe_context
*pctx
)
360 return (struct fd_context
*)pctx
;
364 fd_context_assert_locked(struct fd_context
*ctx
)
366 pipe_mutex_assert_locked(ctx
->screen
->lock
);
370 fd_context_lock(struct fd_context
*ctx
)
372 mtx_lock(&ctx
->screen
->lock
);
376 fd_context_unlock(struct fd_context
*ctx
)
378 mtx_unlock(&ctx
->screen
->lock
);
381 /* mark all state dirty: */
383 fd_context_all_dirty(struct fd_context
*ctx
)
386 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; i
++)
387 ctx
->dirty_shader
[i
] = ~0;
391 fd_context_all_clean(struct fd_context
*ctx
)
394 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; i
++) {
395 /* don't mark compute state as clean, since it is not emitted
396 * during normal draw call. The places that call _all_dirty(),
397 * it is safe to mark compute state dirty as well, but the
398 * inverse is not true.
400 if (i
== PIPE_SHADER_COMPUTE
)
402 ctx
->dirty_shader
[i
] = 0;
406 static inline struct pipe_scissor_state
*
407 fd_context_get_scissor(struct fd_context
*ctx
)
409 if (ctx
->rasterizer
&& ctx
->rasterizer
->scissor
)
410 return &ctx
->scissor
;
411 return &ctx
->disabled_scissor
;
415 fd_supported_prim(struct fd_context
*ctx
, unsigned prim
)
417 return (1 << prim
) & ctx
->primtype_mask
;
421 fd_batch_set_stage(struct fd_batch
*batch
, enum fd_render_stage stage
)
423 struct fd_context
*ctx
= batch
->ctx
;
425 /* special case: internal blits (like mipmap level generation)
426 * go through normal draw path (via util_blitter_blit()).. but
427 * we need to ignore the FD_STAGE_DRAW which will be set, so we
428 * don't enable queries which should be paused during internal
431 if ((batch
->stage
== FD_STAGE_BLIT
) &&
432 (stage
!= FD_STAGE_NULL
))
435 if (ctx
->query_set_stage
)
436 ctx
->query_set_stage(batch
, stage
);
438 batch
->stage
= stage
;
441 void fd_context_setup_common_vbos(struct fd_context
*ctx
);
442 void fd_context_cleanup_common_vbos(struct fd_context
*ctx
);
444 struct pipe_context
* fd_context_init(struct fd_context
*ctx
,
445 struct pipe_screen
*pscreen
, const uint8_t *primtypes
,
446 void *priv
, unsigned flags
);
448 void fd_context_destroy(struct pipe_context
*pctx
);
450 #endif /* FREEDRENO_CONTEXT_H_ */