freedreno: track batch/blit types
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.h
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FREEDRENO_BATCH_H_
28 #define FREEDRENO_BATCH_H_
29
30 #include "util/u_inlines.h"
31 #include "util/list.h"
32
33 #include "freedreno_util.h"
34
35 struct fd_context;
36 struct fd_resource;
37 enum fd_resource_status;
38
39 /* Bitmask of stages in rendering that a particular query query is
40 * active. Queries will be automatically started/stopped (generating
41 * additional fd_hw_sample_period's) on entrance/exit from stages that
42 * are applicable to the query.
43 *
44 * NOTE: set the stage to NULL at end of IB to ensure no query is still
45 * active. Things aren't going to work out the way you want if a query
46 * is active across IB's (or between tile IB and draw IB)
47 */
48 enum fd_render_stage {
49 FD_STAGE_NULL = 0x01,
50 FD_STAGE_DRAW = 0x02,
51 FD_STAGE_CLEAR = 0x04,
52 /* TODO before queries which include MEM2GMEM or GMEM2MEM will
53 * work we will need to call fd_hw_query_prepare() from somewhere
54 * appropriate so that queries in the tiling IB get backed with
55 * memory to write results to.
56 */
57 FD_STAGE_MEM2GMEM = 0x08,
58 FD_STAGE_GMEM2MEM = 0x10,
59 /* used for driver internal draws (ie. util_blitter_blit()): */
60 FD_STAGE_BLIT = 0x20,
61 FD_STAGE_ALL = 0xff,
62 };
63
64 #define MAX_HW_SAMPLE_PROVIDERS 4
65 struct fd_hw_sample_provider;
66 struct fd_hw_sample;
67
68 /* A batch tracks everything about a cmdstream batch/submit, including the
69 * ringbuffers used for binning, draw, and gmem cmds, list of associated
70 * fd_resource-s, etc.
71 */
72 struct fd_batch {
73 struct pipe_reference reference;
74 unsigned seqno;
75 unsigned idx;
76
77 struct fd_context *ctx;
78
79 /* do we need to mem2gmem before rendering. We don't, if for example,
80 * there was a glClear() that invalidated the entire previous buffer
81 * contents. Keep track of which buffer(s) are cleared, or needs
82 * restore. Masks of PIPE_CLEAR_*
83 *
84 * The 'cleared' bits will be set for buffers which are *entirely*
85 * cleared, and 'partial_cleared' bits will be set if you must
86 * check cleared_scissor.
87 */
88 enum {
89 /* align bitmask values w/ PIPE_CLEAR_*.. since that is convenient.. */
90 FD_BUFFER_COLOR = PIPE_CLEAR_COLOR,
91 FD_BUFFER_DEPTH = PIPE_CLEAR_DEPTH,
92 FD_BUFFER_STENCIL = PIPE_CLEAR_STENCIL,
93 FD_BUFFER_ALL = FD_BUFFER_COLOR | FD_BUFFER_DEPTH | FD_BUFFER_STENCIL,
94 } cleared, partial_cleared, restore, resolve;
95
96 bool needs_flush : 1;
97 bool blit : 1;
98 bool back_blit : 1; /* only blit so far is resource shadowing back-blit */
99
100 /* To decide whether to render to system memory, keep track of the
101 * number of draws, and whether any of them require multisample,
102 * depth_test (or depth write), stencil_test, blending, and
103 * color_logic_Op (since those functions are disabled when by-
104 * passing GMEM.
105 */
106 enum {
107 FD_GMEM_CLEARS_DEPTH_STENCIL = 0x01,
108 FD_GMEM_DEPTH_ENABLED = 0x02,
109 FD_GMEM_STENCIL_ENABLED = 0x04,
110
111 FD_GMEM_MSAA_ENABLED = 0x08,
112 FD_GMEM_BLEND_ENABLED = 0x10,
113 FD_GMEM_LOGICOP_ENABLED = 0x20,
114 } gmem_reason;
115 unsigned num_draws; /* number of draws in current batch */
116
117 /* Track the maximal bounds of the scissor of all the draws within a
118 * batch. Used at the tile rendering step (fd_gmem_render_tiles(),
119 * mem2gmem/gmem2mem) to avoid needlessly moving data in/out of gmem.
120 */
121 struct pipe_scissor_state max_scissor;
122
123 /* Track the cleared scissor for color/depth/stencil, so we know
124 * which, if any, tiles need to be restored (mem2gmem). Only valid
125 * if the corresponding bit in ctx->cleared is set.
126 */
127 struct {
128 struct pipe_scissor_state color, depth, stencil;
129 } cleared_scissor;
130
131 /* Keep track of DRAW initiators that need to be patched up depending
132 * on whether we using binning or not:
133 */
134 struct util_dynarray draw_patches;
135
136 /* Keep track of writes to RB_RENDER_CONTROL which need to be patched
137 * once we know whether or not to use GMEM, and GMEM tile pitch.
138 *
139 * (only for a3xx.. but having gen specific subclasses of fd_batch
140 * seemed overkill for now)
141 */
142 struct util_dynarray rbrc_patches;
143
144 struct pipe_framebuffer_state framebuffer;
145
146 /** draw pass cmdstream: */
147 struct fd_ringbuffer *draw;
148 /** binning pass cmdstream: */
149 struct fd_ringbuffer *binning;
150 /** tiling/gmem (IB0) cmdstream: */
151 struct fd_ringbuffer *gmem;
152
153 /**
154 * hw query related state:
155 */
156 /*@{*/
157 /* next sample offset.. incremented for each sample in the batch/
158 * submit, reset to zero on next submit.
159 */
160 uint32_t next_sample_offset;
161
162 /* cached samples (in case multiple queries need to reference
163 * the same sample snapshot)
164 */
165 struct fd_hw_sample *sample_cache[MAX_HW_SAMPLE_PROVIDERS];
166
167 /* which sample providers were active in the current batch: */
168 uint32_t active_providers;
169
170 /* tracking for current stage, to know when to start/stop
171 * any active queries:
172 */
173 enum fd_render_stage stage;
174
175 /* list of samples in current batch: */
176 struct util_dynarray samples;
177
178 /* current query result bo and tile stride: */
179 struct pipe_resource *query_buf;
180 uint32_t query_tile_stride;
181 /*@}*/
182
183
184 /* Set of resources used by currently-unsubmitted batch (read or
185 * write).. does not hold a reference to the resource.
186 */
187 struct set *resources;
188
189 /** key in batch-cache (if not null): */
190 const void *key;
191 uint32_t hash;
192
193 /** set of dependent batches.. holds refs to dependent batches: */
194 uint32_t dependents_mask;
195 };
196
197 struct fd_batch * fd_batch_create(struct fd_context *ctx);
198
199 void fd_batch_reset(struct fd_batch *batch);
200 void fd_batch_flush(struct fd_batch *batch);
201 void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
202 void fd_batch_check_size(struct fd_batch *batch);
203
204 /* not called directly: */
205 void __fd_batch_describe(char* buf, const struct fd_batch *batch);
206 void __fd_batch_destroy(struct fd_batch *batch);
207
208 static inline void
209 fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch)
210 {
211 struct fd_batch *old_batch = *ptr;
212 if (pipe_reference_described(&(*ptr)->reference, &batch->reference,
213 (debug_reference_descriptor)__fd_batch_describe))
214 __fd_batch_destroy(old_batch);
215 *ptr = batch;
216 }
217
218 #endif /* FREEDRENO_BATCH_H_ */