1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
29 #ifndef FREEDRENO_CONTEXT_H_
30 #define FREEDRENO_CONTEXT_H_
32 #include "pipe/p_context.h"
33 #include "indices/u_primconvert.h"
34 #include "util/u_blitter.h"
35 #include "util/list.h"
36 #include "util/u_slab.h"
37 #include "util/u_string.h"
39 #include "freedreno_screen.h"
40 #include "freedreno_gmem.h"
41 #include "freedreno_util.h"
43 #define BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE)
45 struct fd_vertex_stateobj
;
47 struct fd_texture_stateobj
{
48 struct pipe_sampler_view
*textures
[PIPE_MAX_SAMPLERS
];
49 unsigned num_textures
;
50 unsigned valid_textures
;
51 struct pipe_sampler_state
*samplers
[PIPE_MAX_SAMPLERS
];
52 unsigned num_samplers
;
53 unsigned valid_samplers
;
56 struct fd_program_stateobj
{
59 /* rest only used by fd2.. split out: */
61 /* Indexed by semantic name or TGSI_SEMANTIC_COUNT + semantic index
62 * for TGSI_SEMANTIC_GENERIC. Special vs exports (position and point-
63 * size) are not included in this
65 uint8_t export_linkage
[63];
68 struct fd_constbuf_stateobj
{
69 struct pipe_constant_buffer cb
[PIPE_MAX_CONSTANT_BUFFERS
];
70 uint32_t enabled_mask
;
74 struct fd_vertexbuf_stateobj
{
75 struct pipe_vertex_buffer vb
[PIPE_MAX_ATTRIBS
];
77 uint32_t enabled_mask
;
81 struct fd_vertex_stateobj
{
82 struct pipe_vertex_element pipe
[PIPE_MAX_ATTRIBS
];
83 unsigned num_elements
;
86 struct fd_streamout_stateobj
{
87 struct pipe_stream_output_target
*targets
[PIPE_MAX_SO_BUFFERS
];
89 /* Track offset from vtxcnt for streamout data. This counter
90 * is just incremented by # of vertices on each draw until
91 * reset or new streamout buffer bound.
93 * When we eventually have GS, the CPU won't actually know the
94 * number of vertices per draw, so I think we'll have to do
95 * something more clever.
97 unsigned offsets
[PIPE_MAX_SO_BUFFERS
];
100 /* group together the vertex and vertexbuf state.. for ease of passing
101 * around, and because various internal operations (gmem<->mem, etc)
102 * need their own vertex state:
104 struct fd_vertex_state
{
105 struct fd_vertex_stateobj
*vtx
;
106 struct fd_vertexbuf_stateobj vertexbuf
;
109 /* Bitmask of stages in rendering that a particular query query is
110 * active. Queries will be automatically started/stopped (generating
111 * additional fd_hw_sample_period's) on entrance/exit from stages that
112 * are applicable to the query.
114 * NOTE: set the stage to NULL at end of IB to ensure no query is still
115 * active. Things aren't going to work out the way you want if a query
116 * is active across IB's (or between tile IB and draw IB)
118 enum fd_render_stage
{
119 FD_STAGE_NULL
= 0x01,
120 FD_STAGE_DRAW
= 0x02,
121 FD_STAGE_CLEAR
= 0x04,
122 /* TODO before queries which include MEM2GMEM or GMEM2MEM will
123 * work we will need to call fd_hw_query_prepare() from somewhere
124 * appropriate so that queries in the tiling IB get backed with
125 * memory to write results to.
127 FD_STAGE_MEM2GMEM
= 0x08,
128 FD_STAGE_GMEM2MEM
= 0x10,
129 /* used for driver internal draws (ie. util_blitter_blit()): */
130 FD_STAGE_BLIT
= 0x20,
134 #define MAX_HW_SAMPLE_PROVIDERS 4
135 struct fd_hw_sample_provider
;
139 struct pipe_context base
;
141 struct fd_device
*dev
;
142 struct fd_screen
*screen
;
144 struct blitter_context
*blitter
;
145 struct primconvert_context
*primconvert
;
147 /* slab for pipe_transfer allocations: */
148 struct util_slab_mempool transfer_pool
;
150 /* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
151 struct util_slab_mempool sample_pool
;
152 struct util_slab_mempool sample_period_pool
;
154 /* next sample offset.. incremented for each sample in the batch/
155 * submit, reset to zero on next submit.
157 uint32_t next_sample_offset
;
159 /* sample-providers for hw queries: */
160 const struct fd_hw_sample_provider
*sample_providers
[MAX_HW_SAMPLE_PROVIDERS
];
162 /* cached samples (in case multiple queries need to reference
163 * the same sample snapshot)
165 struct fd_hw_sample
*sample_cache
[MAX_HW_SAMPLE_PROVIDERS
];
167 /* which sample providers were active in the current batch: */
168 uint32_t active_providers
;
170 /* tracking for current stage, to know when to start/stop
171 * any active queries:
173 enum fd_render_stage stage
;
175 /* list of active queries: */
176 struct list_head active_queries
;
178 /* list of queries that are not active, but were active in the
181 struct list_head current_queries
;
183 /* current query result bo and tile stride: */
184 struct fd_bo
*query_bo
;
185 uint32_t query_tile_stride
;
187 /* list of resources used by currently-unsubmitted renders */
188 struct list_head used_resources
;
190 /* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
191 * DI_PT_x value to use for draw initiator. There are some
192 * slight differences between generation:
194 const uint8_t *primtypes
;
195 uint32_t primtype_mask
;
197 /* shaders used by clear, and gmem->mem blits: */
198 struct fd_program_stateobj solid_prog
; // TODO move to screen?
200 /* shaders used by mem->gmem blits: */
201 struct fd_program_stateobj blit_prog
[MAX_RENDER_TARGETS
]; // TODO move to screen?
202 struct fd_program_stateobj blit_z
, blit_zs
;
204 /* do we need to mem2gmem before rendering. We don't, if for example,
205 * there was a glClear() that invalidated the entire previous buffer
206 * contents. Keep track of which buffer(s) are cleared, or needs
207 * restore. Masks of PIPE_CLEAR_*
209 * The 'cleared' bits will be set for buffers which are *entirely*
210 * cleared, and 'partial_cleared' bits will be set if you must
211 * check cleared_scissor.
214 /* align bitmask values w/ PIPE_CLEAR_*.. since that is convenient.. */
215 FD_BUFFER_COLOR
= PIPE_CLEAR_COLOR
,
216 FD_BUFFER_DEPTH
= PIPE_CLEAR_DEPTH
,
217 FD_BUFFER_STENCIL
= PIPE_CLEAR_STENCIL
,
218 FD_BUFFER_ALL
= FD_BUFFER_COLOR
| FD_BUFFER_DEPTH
| FD_BUFFER_STENCIL
,
219 } cleared
, partial_cleared
, restore
, resolve
;
223 /* To decide whether to render to system memory, keep track of the
224 * number of draws, and whether any of them require multisample,
225 * depth_test (or depth write), stencil_test, blending, and
226 * color_logic_Op (since those functions are disabled when by-
230 FD_GMEM_CLEARS_DEPTH_STENCIL
= 0x01,
231 FD_GMEM_DEPTH_ENABLED
= 0x02,
232 FD_GMEM_STENCIL_ENABLED
= 0x04,
234 FD_GMEM_MSAA_ENABLED
= 0x08,
235 FD_GMEM_BLEND_ENABLED
= 0x10,
236 FD_GMEM_LOGICOP_ENABLED
= 0x20,
238 unsigned num_draws
; /* number of draws in current batch */
243 uint64_t prims_emitted
;
244 uint64_t prims_generated
;
246 uint64_t batch_total
, batch_sysmem
, batch_gmem
, batch_restore
;
249 /* we can't really sanely deal with wraparound point in ringbuffer
250 * and because of the way tiling works we can't really flush at
251 * arbitrary points (without a big performance hit). When we get
252 * too close to the end of the current ringbuffer, cycle to the next
253 * one (and wait for pending rendering from next rb to complete).
254 * We want the # of ringbuffers to be high enough that we don't
255 * normally have to wait before resetting to the start of the next
258 struct fd_ringbuffer
*rings
[8];
261 /* NOTE: currently using a single ringbuffer for both draw and
262 * tiling commands, we need to make sure we need to leave enough
263 * room at the end to append the tiling commands when we flush.
264 * 0x7000 dwords should be a couple times more than we ever need
265 * so should be a nice conservative threshold.
267 #define FD_TILING_COMMANDS_DWORDS 0x7000
269 /* normal draw/clear cmds: */
270 struct fd_ringbuffer
*ring
;
271 struct fd_ringmarker
*draw_start
, *draw_end
;
273 /* binning pass draw/clear cmds: */
274 struct fd_ringbuffer
*binning_ring
;
275 struct fd_ringmarker
*binning_start
, *binning_end
;
277 /* Keep track if WAIT_FOR_IDLE is needed for registers we need
282 /* Do we need to re-emit RB_FRAME_BUFFER_DIMENSION? At least on a3xx
283 * it is not a banked context register, so it needs a WFI to update.
284 * Keep track if it has actually changed, to avoid unneeded WFI.
288 /* Keep track of DRAW initiators that need to be patched up depending
289 * on whether we using binning or not:
291 struct util_dynarray draw_patches
;
293 struct pipe_scissor_state scissor
;
295 /* we don't have a disable/enable bit for scissor, so instead we keep
296 * a disabled-scissor state which matches the entire bound framebuffer
297 * and use that when scissor is not enabled.
299 struct pipe_scissor_state disabled_scissor
;
301 /* Track the maximal bounds of the scissor of all the draws within a
302 * batch. Used at the tile rendering step (fd_gmem_render_tiles(),
303 * mem2gmem/gmem2mem) to avoid needlessly moving data in/out of gmem.
305 struct pipe_scissor_state max_scissor
;
307 /* Track the cleared scissor for color/depth/stencil, so we know
308 * which, if any, tiles need to be restored (mem2gmem). Only valid
309 * if the corresponding bit in ctx->cleared is set.
312 struct pipe_scissor_state color
, depth
, stencil
;
315 /* Current gmem/tiling configuration.. gets updated on render_tiles()
316 * if out of date with current maximal-scissor/cpp:
318 struct fd_gmem_stateobj gmem
;
319 struct fd_vsc_pipe pipe
[8];
320 struct fd_tile tile
[512];
322 /* which state objects need to be re-emit'd: */
324 FD_DIRTY_BLEND
= (1 << 0),
325 FD_DIRTY_RASTERIZER
= (1 << 1),
326 FD_DIRTY_ZSA
= (1 << 2),
327 FD_DIRTY_FRAGTEX
= (1 << 3),
328 FD_DIRTY_VERTTEX
= (1 << 4),
329 FD_DIRTY_TEXSTATE
= (1 << 5),
331 FD_SHADER_DIRTY_VP
= (1 << 6),
332 FD_SHADER_DIRTY_FP
= (1 << 7),
333 /* skip geom/tcs/tes/compute */
334 FD_DIRTY_PROG
= FD_SHADER_DIRTY_FP
| FD_SHADER_DIRTY_VP
,
336 FD_DIRTY_BLEND_COLOR
= (1 << 12),
337 FD_DIRTY_STENCIL_REF
= (1 << 13),
338 FD_DIRTY_SAMPLE_MASK
= (1 << 14),
339 FD_DIRTY_FRAMEBUFFER
= (1 << 15),
340 FD_DIRTY_STIPPLE
= (1 << 16),
341 FD_DIRTY_VIEWPORT
= (1 << 17),
342 FD_DIRTY_CONSTBUF
= (1 << 18),
343 FD_DIRTY_VTXSTATE
= (1 << 19),
344 FD_DIRTY_VTXBUF
= (1 << 20),
345 FD_DIRTY_INDEXBUF
= (1 << 21),
346 FD_DIRTY_SCISSOR
= (1 << 22),
347 FD_DIRTY_STREAMOUT
= (1 << 23),
348 FD_DIRTY_UCP
= (1 << 24),
349 FD_DIRTY_BLEND_DUAL
= (1 << 25),
352 struct pipe_blend_state
*blend
;
353 struct pipe_rasterizer_state
*rasterizer
;
354 struct pipe_depth_stencil_alpha_state
*zsa
;
356 struct fd_texture_stateobj verttex
, fragtex
;
358 struct fd_program_stateobj prog
;
360 struct fd_vertex_state vtx
;
362 struct pipe_blend_color blend_color
;
363 struct pipe_stencil_ref stencil_ref
;
364 unsigned sample_mask
;
365 struct pipe_framebuffer_state framebuffer
;
366 struct pipe_poly_stipple stipple
;
367 struct pipe_viewport_state viewport
;
368 struct fd_constbuf_stateobj constbuf
[PIPE_SHADER_TYPES
];
369 struct pipe_index_buffer indexbuf
;
370 struct fd_streamout_stateobj streamout
;
371 struct pipe_clip_state ucp
;
373 struct pipe_query
*cond_query
;
374 bool cond_cond
; /* inverted rendering condition */
377 struct pipe_debug_callback debug
;
379 /* GMEM/tile handling fxns: */
380 void (*emit_tile_init
)(struct fd_context
*ctx
);
381 void (*emit_tile_prep
)(struct fd_context
*ctx
, struct fd_tile
*tile
);
382 void (*emit_tile_mem2gmem
)(struct fd_context
*ctx
, struct fd_tile
*tile
);
383 void (*emit_tile_renderprep
)(struct fd_context
*ctx
, struct fd_tile
*tile
);
384 void (*emit_tile_gmem2mem
)(struct fd_context
*ctx
, struct fd_tile
*tile
);
386 /* optional, for GMEM bypass: */
387 void (*emit_sysmem_prep
)(struct fd_context
*ctx
);
390 bool (*draw_vbo
)(struct fd_context
*ctx
, const struct pipe_draw_info
*info
);
391 void (*clear
)(struct fd_context
*ctx
, unsigned buffers
,
392 const union pipe_color_union
*color
, double depth
, unsigned stencil
);
394 /* constant emit: (note currently not used/needed for a2xx) */
395 void (*emit_const
)(struct fd_ringbuffer
*ring
, enum shader_t type
,
396 uint32_t regid
, uint32_t offset
, uint32_t sizedwords
,
397 const uint32_t *dwords
, struct pipe_resource
*prsc
);
398 /* emit bo addresses as constant: */
399 void (*emit_const_bo
)(struct fd_ringbuffer
*ring
, enum shader_t type
, boolean write
,
400 uint32_t regid
, uint32_t num
, struct pipe_resource
**prscs
, uint32_t *offsets
);
402 /* indirect-branch emit: */
403 void (*emit_ib
)(struct fd_ringbuffer
*ring
, struct fd_ringmarker
*start
,
404 struct fd_ringmarker
*end
);
407 static inline struct fd_context
*
408 fd_context(struct pipe_context
*pctx
)
410 return (struct fd_context
*)pctx
;
413 static inline struct pipe_scissor_state
*
414 fd_context_get_scissor(struct fd_context
*ctx
)
416 if (ctx
->rasterizer
&& ctx
->rasterizer
->scissor
)
417 return &ctx
->scissor
;
418 return &ctx
->disabled_scissor
;
422 fd_supported_prim(struct fd_context
*ctx
, unsigned prim
)
424 return (1 << prim
) & ctx
->primtype_mask
;
428 fd_reset_wfi(struct fd_context
*ctx
)
430 ctx
->needs_wfi
= true;
433 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
434 * been one since last draw:
437 fd_wfi(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
)
439 if (ctx
->needs_wfi
) {
441 ctx
->needs_wfi
= false;
445 /* emit a CP_EVENT_WRITE:
448 fd_event_write(struct fd_context
*ctx
, struct fd_ringbuffer
*ring
,
449 enum vgt_event_type evt
)
451 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
456 struct pipe_context
* fd_context_init(struct fd_context
*ctx
,
457 struct pipe_screen
*pscreen
, const uint8_t *primtypes
,
460 void fd_context_render(struct pipe_context
*pctx
);
462 void fd_context_destroy(struct pipe_context
*pctx
);
464 #endif /* FREEDRENO_CONTEXT_H_ */