freedreno: rework blit API
[mesa.git] / src / gallium / drivers / freedreno / freedreno_context.h
1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FREEDRENO_CONTEXT_H_
28 #define FREEDRENO_CONTEXT_H_
29
30 #include "pipe/p_context.h"
31 #include "indices/u_primconvert.h"
32 #include "util/u_blitter.h"
33 #include "util/list.h"
34 #include "util/slab.h"
35 #include "util/u_string.h"
36
37 #include "freedreno_batch.h"
38 #include "freedreno_screen.h"
39 #include "freedreno_gmem.h"
40 #include "freedreno_util.h"
41
42 #define BORDER_COLOR_UPLOAD_SIZE (2 * PIPE_MAX_SAMPLERS * BORDERCOLOR_SIZE)
43
44 struct fd_vertex_stateobj;
45
46 struct fd_texture_stateobj {
47 struct pipe_sampler_view *textures[PIPE_MAX_SAMPLERS];
48 unsigned num_textures;
49 unsigned valid_textures;
50 struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
51 unsigned num_samplers;
52 unsigned valid_samplers;
53 /* number of samples per sampler, 2 bits per sampler: */
54 uint32_t samples;
55 };
56
57 struct fd_program_stateobj {
58 void *vp, *fp;
59
60 /* rest only used by fd2.. split out: */
61 uint8_t num_exports;
62 /* Indexed by semantic name or TGSI_SEMANTIC_COUNT + semantic index
63 * for TGSI_SEMANTIC_GENERIC. Special vs exports (position and point-
64 * size) are not included in this
65 */
66 uint8_t export_linkage[63];
67 };
68
69 struct fd_constbuf_stateobj {
70 struct pipe_constant_buffer cb[PIPE_MAX_CONSTANT_BUFFERS];
71 uint32_t enabled_mask;
72 };
73
74 struct fd_shaderbuf_stateobj {
75 struct pipe_shader_buffer sb[PIPE_MAX_SHADER_BUFFERS];
76 uint32_t enabled_mask;
77 };
78
79 struct fd_shaderimg_stateobj {
80 struct pipe_image_view si[PIPE_MAX_SHADER_IMAGES];
81 uint32_t enabled_mask;
82 };
83
84 struct fd_vertexbuf_stateobj {
85 struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
86 unsigned count;
87 uint32_t enabled_mask;
88 };
89
90 struct fd_vertex_stateobj {
91 struct pipe_vertex_element pipe[PIPE_MAX_ATTRIBS];
92 unsigned num_elements;
93 };
94
95 struct fd_streamout_stateobj {
96 struct pipe_stream_output_target *targets[PIPE_MAX_SO_BUFFERS];
97 unsigned num_targets;
98 /* Track offset from vtxcnt for streamout data. This counter
99 * is just incremented by # of vertices on each draw until
100 * reset or new streamout buffer bound.
101 *
102 * When we eventually have GS, the CPU won't actually know the
103 * number of vertices per draw, so I think we'll have to do
104 * something more clever.
105 */
106 unsigned offsets[PIPE_MAX_SO_BUFFERS];
107 };
108
109 #define MAX_GLOBAL_BUFFERS 16
110 struct fd_global_bindings_stateobj {
111 struct pipe_resource *buf[MAX_GLOBAL_BUFFERS];
112 uint32_t enabled_mask;
113 };
114
115 /* group together the vertex and vertexbuf state.. for ease of passing
116 * around, and because various internal operations (gmem<->mem, etc)
117 * need their own vertex state:
118 */
119 struct fd_vertex_state {
120 struct fd_vertex_stateobj *vtx;
121 struct fd_vertexbuf_stateobj vertexbuf;
122 };
123
124 /* global 3d pipeline dirty state: */
125 enum fd_dirty_3d_state {
126 FD_DIRTY_BLEND = BIT(0),
127 FD_DIRTY_RASTERIZER = BIT(1),
128 FD_DIRTY_ZSA = BIT(2),
129 FD_DIRTY_BLEND_COLOR = BIT(3),
130 FD_DIRTY_STENCIL_REF = BIT(4),
131 FD_DIRTY_SAMPLE_MASK = BIT(5),
132 FD_DIRTY_FRAMEBUFFER = BIT(6),
133 FD_DIRTY_STIPPLE = BIT(7),
134 FD_DIRTY_VIEWPORT = BIT(8),
135 FD_DIRTY_VTXSTATE = BIT(9),
136 FD_DIRTY_VTXBUF = BIT(10),
137
138 FD_DIRTY_SCISSOR = BIT(12),
139 FD_DIRTY_STREAMOUT = BIT(13),
140 FD_DIRTY_UCP = BIT(14),
141 FD_DIRTY_BLEND_DUAL = BIT(15),
142
143 /* These are a bit redundent with fd_dirty_shader_state, and possibly
144 * should be removed. (But OTOH kinda convenient in some places)
145 */
146 FD_DIRTY_PROG = BIT(16),
147 FD_DIRTY_CONST = BIT(17),
148 FD_DIRTY_TEX = BIT(18),
149
150 /* only used by a2xx.. possibly can be removed.. */
151 FD_DIRTY_TEXSTATE = BIT(19),
152 };
153
154 /* per shader-stage dirty state: */
155 enum fd_dirty_shader_state {
156 FD_DIRTY_SHADER_PROG = BIT(0),
157 FD_DIRTY_SHADER_CONST = BIT(1),
158 FD_DIRTY_SHADER_TEX = BIT(2),
159 FD_DIRTY_SHADER_SSBO = BIT(3),
160 FD_DIRTY_SHADER_IMAGE = BIT(4),
161 };
162
163 struct fd_context {
164 struct pipe_context base;
165
166 struct fd_device *dev;
167 struct fd_screen *screen;
168 struct fd_pipe *pipe;
169
170 struct util_queue flush_queue;
171
172 struct blitter_context *blitter;
173 void *clear_rs_state;
174 struct primconvert_context *primconvert;
175
176 /* slab for pipe_transfer allocations: */
177 struct slab_child_pool transfer_pool;
178
179 /**
180 * query related state:
181 */
182 /*@{*/
183 /* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
184 struct slab_mempool sample_pool;
185 struct slab_mempool sample_period_pool;
186
187 /* sample-providers for hw queries: */
188 const struct fd_hw_sample_provider *hw_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
189
190 /* list of active queries: */
191 struct list_head hw_active_queries;
192
193 /* sample-providers for accumulating hw queries: */
194 const struct fd_acc_sample_provider *acc_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
195
196 /* list of active accumulating queries: */
197 struct list_head acc_active_queries;
198 /*@}*/
199
200 /* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
201 * DI_PT_x value to use for draw initiator. There are some
202 * slight differences between generation:
203 */
204 const uint8_t *primtypes;
205 uint32_t primtype_mask;
206
207 /* shaders used by clear, and gmem->mem blits: */
208 struct fd_program_stateobj solid_prog; // TODO move to screen?
209
210 /* shaders used by mem->gmem blits: */
211 struct fd_program_stateobj blit_prog[MAX_RENDER_TARGETS]; // TODO move to screen?
212 struct fd_program_stateobj blit_z, blit_zs;
213
214 /* Stats/counters:
215 */
216 struct {
217 uint64_t prims_emitted;
218 uint64_t prims_generated;
219 uint64_t draw_calls;
220 uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw, batch_restore;
221 uint64_t staging_uploads, shadow_uploads;
222 uint64_t vs_regs, fs_regs;
223 } stats;
224
225 /* Current batch.. the rule here is that you can deref ctx->batch
226 * in codepaths from pipe_context entrypoints. But not in code-
227 * paths from fd_batch_flush() (basically, the stuff that gets
228 * called from GMEM code), since in those code-paths the batch
229 * you care about is not necessarily the same as ctx->batch.
230 */
231 struct fd_batch *batch;
232
233 /* NULL if there has been rendering since last flush. Otherwise
234 * keeps a reference to the last fence so we can re-use it rather
235 * than having to flush no-op batch.
236 */
237 struct pipe_fence_handle *last_fence;
238
239 /* Are we in process of shadowing a resource? Used to detect recursion
240 * in transfer_map, and skip unneeded synchronization.
241 */
242 bool in_shadow : 1;
243
244 /* Ie. in blit situation where we no longer care about previous framebuffer
245 * contents. Main point is to eliminate blits from fd_try_shadow_resource().
246 * For example, in case of texture upload + gen-mipmaps.
247 */
248 bool in_blit : 1;
249
250 struct pipe_scissor_state scissor;
251
252 /* we don't have a disable/enable bit for scissor, so instead we keep
253 * a disabled-scissor state which matches the entire bound framebuffer
254 * and use that when scissor is not enabled.
255 */
256 struct pipe_scissor_state disabled_scissor;
257
258 /* Current gmem/tiling configuration.. gets updated on render_tiles()
259 * if out of date with current maximal-scissor/cpp:
260 *
261 * (NOTE: this is kind of related to the batch, but moving it there
262 * means we'd always have to recalc tiles ever batch)
263 */
264 struct fd_gmem_stateobj gmem;
265 struct fd_vsc_pipe vsc_pipe[32];
266 struct fd_tile tile[512];
267
268 /* which state objects need to be re-emit'd: */
269 enum fd_dirty_3d_state dirty;
270
271 /* per shader-stage dirty status: */
272 enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES];
273
274 void *compute;
275 struct pipe_blend_state *blend;
276 struct pipe_rasterizer_state *rasterizer;
277 struct pipe_depth_stencil_alpha_state *zsa;
278
279 struct fd_texture_stateobj tex[PIPE_SHADER_TYPES];
280
281 struct fd_program_stateobj prog;
282
283 struct fd_vertex_state vtx;
284
285 struct pipe_blend_color blend_color;
286 struct pipe_stencil_ref stencil_ref;
287 unsigned sample_mask;
288 /* local context fb state, for when ctx->batch is null: */
289 struct pipe_framebuffer_state framebuffer;
290 struct pipe_poly_stipple stipple;
291 struct pipe_viewport_state viewport;
292 struct pipe_scissor_state viewport_scissor;
293 struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
294 struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES];
295 struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES];
296 struct fd_streamout_stateobj streamout;
297 struct fd_global_bindings_stateobj global_bindings;
298 struct pipe_clip_state ucp;
299
300 struct pipe_query *cond_query;
301 bool cond_cond; /* inverted rendering condition */
302 uint cond_mode;
303
304 struct pipe_debug_callback debug;
305
306 /* GMEM/tile handling fxns: */
307 void (*emit_tile_init)(struct fd_batch *batch);
308 void (*emit_tile_prep)(struct fd_batch *batch, struct fd_tile *tile);
309 void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile);
310 void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile);
311 void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile);
312 void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
313
314 /* optional, for GMEM bypass: */
315 void (*emit_sysmem_prep)(struct fd_batch *batch);
316 void (*emit_sysmem_fini)(struct fd_batch *batch);
317
318 /* draw: */
319 bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info,
320 unsigned index_offset);
321 bool (*clear)(struct fd_context *ctx, unsigned buffers,
322 const union pipe_color_union *color, double depth, unsigned stencil);
323
324 /* compute: */
325 void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info);
326
327 /* constant emit: (note currently not used/needed for a2xx) */
328 void (*emit_const)(struct fd_ringbuffer *ring, gl_shader_stage type,
329 uint32_t regid, uint32_t offset, uint32_t sizedwords,
330 const uint32_t *dwords, struct pipe_resource *prsc);
331 /* emit bo addresses as constant: */
332 void (*emit_const_bo)(struct fd_ringbuffer *ring, gl_shader_stage type, boolean write,
333 uint32_t regid, uint32_t num, struct pipe_resource **prscs, uint32_t *offsets);
334
335 /* indirect-branch emit: */
336 void (*emit_ib)(struct fd_ringbuffer *ring, struct fd_ringbuffer *target);
337
338 /* query: */
339 struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type);
340 void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
341 void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
342 struct fd_ringbuffer *ring);
343 void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage);
344
345 /* blitter: */
346 bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
347
348 /* simple gpu "memcpy": */
349 void (*mem_to_mem)(struct fd_ringbuffer *ring, struct pipe_resource *dst,
350 unsigned dst_off, struct pipe_resource *src, unsigned src_off,
351 unsigned sizedwords);
352
353 /*
354 * Common pre-cooked VBO state (used for a3xx and later):
355 */
356
357 /* for clear/gmem->mem vertices, and mem->gmem */
358 struct pipe_resource *solid_vbuf;
359
360 /* for mem->gmem tex coords: */
361 struct pipe_resource *blit_texcoord_vbuf;
362
363 /* vertex state for solid_vbuf:
364 * - solid_vbuf / 12 / R32G32B32_FLOAT
365 */
366 struct fd_vertex_state solid_vbuf_state;
367
368 /* vertex state for blit_prog:
369 * - blit_texcoord_vbuf / 8 / R32G32_FLOAT
370 * - solid_vbuf / 12 / R32G32B32_FLOAT
371 */
372 struct fd_vertex_state blit_vbuf_state;
373 };
374
375 static inline struct fd_context *
376 fd_context(struct pipe_context *pctx)
377 {
378 return (struct fd_context *)pctx;
379 }
380
381 static inline void
382 fd_context_assert_locked(struct fd_context *ctx)
383 {
384 pipe_mutex_assert_locked(ctx->screen->lock);
385 }
386
387 static inline void
388 fd_context_lock(struct fd_context *ctx)
389 {
390 mtx_lock(&ctx->screen->lock);
391 }
392
393 static inline void
394 fd_context_unlock(struct fd_context *ctx)
395 {
396 mtx_unlock(&ctx->screen->lock);
397 }
398
399 /* mark all state dirty: */
400 static inline void
401 fd_context_all_dirty(struct fd_context *ctx)
402 {
403 ctx->dirty = ~0;
404 for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++)
405 ctx->dirty_shader[i] = ~0;
406 }
407
408 static inline void
409 fd_context_all_clean(struct fd_context *ctx)
410 {
411 ctx->dirty = 0;
412 for (unsigned i = 0; i < PIPE_SHADER_TYPES; i++) {
413 /* don't mark compute state as clean, since it is not emitted
414 * during normal draw call. The places that call _all_dirty(),
415 * it is safe to mark compute state dirty as well, but the
416 * inverse is not true.
417 */
418 if (i == PIPE_SHADER_COMPUTE)
419 continue;
420 ctx->dirty_shader[i] = 0;
421 }
422 }
423
424 static inline struct pipe_scissor_state *
425 fd_context_get_scissor(struct fd_context *ctx)
426 {
427 if (ctx->rasterizer && ctx->rasterizer->scissor)
428 return &ctx->scissor;
429 return &ctx->disabled_scissor;
430 }
431
432 static inline bool
433 fd_supported_prim(struct fd_context *ctx, unsigned prim)
434 {
435 return (1 << prim) & ctx->primtype_mask;
436 }
437
438 static inline struct fd_batch *
439 fd_context_batch(struct fd_context *ctx)
440 {
441 if (unlikely(!ctx->batch)) {
442 struct fd_batch *batch =
443 fd_batch_from_fb(&ctx->screen->batch_cache, ctx, &ctx->framebuffer);
444 util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer);
445 ctx->batch = batch;
446 fd_context_all_dirty(ctx);
447 }
448 return ctx->batch;
449 }
450
451 static inline void
452 fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
453 {
454 struct fd_context *ctx = batch->ctx;
455
456 /* special case: internal blits (like mipmap level generation)
457 * go through normal draw path (via util_blitter_blit()).. but
458 * we need to ignore the FD_STAGE_DRAW which will be set, so we
459 * don't enable queries which should be paused during internal
460 * blits:
461 */
462 if ((batch->stage == FD_STAGE_BLIT) &&
463 (stage != FD_STAGE_NULL))
464 return;
465
466 if (ctx->query_set_stage)
467 ctx->query_set_stage(batch, stage);
468
469 batch->stage = stage;
470 }
471
472 void fd_context_setup_common_vbos(struct fd_context *ctx);
473 void fd_context_cleanup_common_vbos(struct fd_context *ctx);
474
475 struct pipe_context * fd_context_init(struct fd_context *ctx,
476 struct pipe_screen *pscreen, const uint8_t *primtypes,
477 void *priv, unsigned flags);
478
479 void fd_context_destroy(struct pipe_context *pctx);
480
481 #endif /* FREEDRENO_CONTEXT_H_ */