a9a7ecdaf2484c3c546af010d6dc921e1d086b6d
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.h
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef FREEDRENO_BATCH_H_
28 #define FREEDRENO_BATCH_H_
29
30 #include "util/u_inlines.h"
31 #include "util/u_queue.h"
32 #include "util/list.h"
33
34 #include "freedreno_util.h"
35
36 struct fd_context;
37 struct fd_resource;
38 enum fd_resource_status;
39
40 /* Bitmask of stages in rendering that a particular query query is
41 * active. Queries will be automatically started/stopped (generating
42 * additional fd_hw_sample_period's) on entrance/exit from stages that
43 * are applicable to the query.
44 *
45 * NOTE: set the stage to NULL at end of IB to ensure no query is still
46 * active. Things aren't going to work out the way you want if a query
47 * is active across IB's (or between tile IB and draw IB)
48 */
49 enum fd_render_stage {
50 FD_STAGE_NULL = 0x01,
51 FD_STAGE_DRAW = 0x02,
52 FD_STAGE_CLEAR = 0x04,
53 /* used for driver internal draws (ie. util_blitter_blit()): */
54 FD_STAGE_BLIT = 0x08,
55 FD_STAGE_ALL = 0xff,
56 };
57
58 #define MAX_HW_SAMPLE_PROVIDERS 5
59 struct fd_hw_sample_provider;
60 struct fd_hw_sample;
61
62 /* A batch tracks everything about a cmdstream batch/submit, including the
63 * ringbuffers used for binning, draw, and gmem cmds, list of associated
64 * fd_resource-s, etc.
65 */
66 struct fd_batch {
67 struct pipe_reference reference;
68 unsigned seqno;
69 unsigned idx; /* index into cache->batches[] */
70
71 int in_fence_fd;
72 bool needs_out_fence_fd;
73 struct pipe_fence_handle *fence;
74
75 struct fd_context *ctx;
76
77 struct util_queue_fence flush_fence;
78
79 /* do we need to mem2gmem before rendering. We don't, if for example,
80 * there was a glClear() that invalidated the entire previous buffer
81 * contents. Keep track of which buffer(s) are cleared, or needs
82 * restore. Masks of PIPE_CLEAR_*
83 *
84 * The 'cleared' bits will be set for buffers which are *entirely*
85 * cleared, and 'partial_cleared' bits will be set if you must
86 * check cleared_scissor.
87 *
88 * The 'invalidated' bits are set for cleared buffers, and buffers
89 * where the contents are undefined, ie. what we don't need to restore
90 * to gmem.
91 */
92 enum {
93 /* align bitmask values w/ PIPE_CLEAR_*.. since that is convenient.. */
94 FD_BUFFER_COLOR = PIPE_CLEAR_COLOR,
95 FD_BUFFER_DEPTH = PIPE_CLEAR_DEPTH,
96 FD_BUFFER_STENCIL = PIPE_CLEAR_STENCIL,
97 FD_BUFFER_ALL = FD_BUFFER_COLOR | FD_BUFFER_DEPTH | FD_BUFFER_STENCIL,
98 } invalidated, cleared, restore, resolve;
99
100 /* is this a non-draw batch (ie compute/blit which has no pfb state)? */
101 bool nondraw : 1;
102 bool needs_flush : 1;
103 bool flushed : 1;
104 bool blit : 1;
105 bool back_blit : 1; /* only blit so far is resource shadowing back-blit */
106
107 /* Keep track if WAIT_FOR_IDLE is needed for registers we need
108 * to update via RMW:
109 */
110 bool needs_wfi : 1;
111
112 /* To decide whether to render to system memory, keep track of the
113 * number of draws, and whether any of them require multisample,
114 * depth_test (or depth write), stencil_test, blending, and
115 * color_logic_Op (since those functions are disabled when by-
116 * passing GMEM.
117 */
118 enum {
119 FD_GMEM_CLEARS_DEPTH_STENCIL = 0x01,
120 FD_GMEM_DEPTH_ENABLED = 0x02,
121 FD_GMEM_STENCIL_ENABLED = 0x04,
122
123 FD_GMEM_BLEND_ENABLED = 0x10,
124 FD_GMEM_LOGICOP_ENABLED = 0x20,
125 } gmem_reason;
126 unsigned num_draws; /* number of draws in current batch */
127
128 /* Track the maximal bounds of the scissor of all the draws within a
129 * batch. Used at the tile rendering step (fd_gmem_render_tiles(),
130 * mem2gmem/gmem2mem) to avoid needlessly moving data in/out of gmem.
131 */
132 struct pipe_scissor_state max_scissor;
133
134 /* Keep track of DRAW initiators that need to be patched up depending
135 * on whether we using binning or not:
136 */
137 struct util_dynarray draw_patches;
138
139 /* Keep track of blitter GMEM offsets that need to be patched up once we
140 * know the gmem layout:
141 */
142 struct util_dynarray gmem_patches;
143
144 /* Keep track of writes to RB_RENDER_CONTROL which need to be patched
145 * once we know whether or not to use GMEM, and GMEM tile pitch.
146 *
147 * (only for a3xx.. but having gen specific subclasses of fd_batch
148 * seemed overkill for now)
149 */
150 struct util_dynarray rbrc_patches;
151
152 struct pipe_framebuffer_state framebuffer;
153
154 struct fd_submit *submit;
155
156 /** draw pass cmdstream: */
157 struct fd_ringbuffer *draw;
158 /** binning pass cmdstream: */
159 struct fd_ringbuffer *binning;
160 /** tiling/gmem (IB0) cmdstream: */
161 struct fd_ringbuffer *gmem;
162
163 // TODO maybe more generically split out clear and clear_binning rings?
164 struct fd_ringbuffer *lrz_clear;
165 struct fd_ringbuffer *tile_setup;
166 struct fd_ringbuffer *tile_fini;
167
168 /**
169 * hw query related state:
170 */
171 /*@{*/
172 /* next sample offset.. incremented for each sample in the batch/
173 * submit, reset to zero on next submit.
174 */
175 uint32_t next_sample_offset;
176
177 /* cached samples (in case multiple queries need to reference
178 * the same sample snapshot)
179 */
180 struct fd_hw_sample *sample_cache[MAX_HW_SAMPLE_PROVIDERS];
181
182 /* which sample providers were active in the current batch: */
183 uint32_t active_providers;
184
185 /* tracking for current stage, to know when to start/stop
186 * any active queries:
187 */
188 enum fd_render_stage stage;
189
190 /* list of samples in current batch: */
191 struct util_dynarray samples;
192
193 /* current query result bo and tile stride: */
194 struct pipe_resource *query_buf;
195 uint32_t query_tile_stride;
196 /*@}*/
197
198
199 /* Set of resources used by currently-unsubmitted batch (read or
200 * write).. does not hold a reference to the resource.
201 */
202 struct set *resources;
203
204 /** key in batch-cache (if not null): */
205 const void *key;
206 uint32_t hash;
207
208 /** set of dependent batches.. holds refs to dependent batches: */
209 uint32_t dependents_mask;
210 };
211
212 struct fd_batch * fd_batch_create(struct fd_context *ctx, bool nondraw);
213
214 void fd_batch_reset(struct fd_batch *batch);
215 void fd_batch_sync(struct fd_batch *batch);
216 void fd_batch_flush(struct fd_batch *batch, bool sync, bool force);
217 void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
218 void fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write);
219 void fd_batch_check_size(struct fd_batch *batch);
220
221 /* not called directly: */
222 void __fd_batch_describe(char* buf, const struct fd_batch *batch);
223 void __fd_batch_destroy(struct fd_batch *batch);
224
225 /*
226 * NOTE the rule is, you need to hold the screen->lock when destroying
227 * a batch.. so either use fd_batch_reference() (which grabs the lock
228 * for you) if you don't hold the lock, or fd_batch_reference_locked()
229 * if you do hold the lock.
230 *
231 * WARNING the _locked() version can briefly drop the lock. Without
232 * recursive mutexes, I'm not sure there is much else we can do (since
233 * __fd_batch_destroy() needs to unref resources)
234 */
235
236 /* fwd-decl prototypes to untangle header dependency :-/ */
237 static inline void fd_context_assert_locked(struct fd_context *ctx);
238 static inline void fd_context_lock(struct fd_context *ctx);
239 static inline void fd_context_unlock(struct fd_context *ctx);
240
241 static inline void
242 fd_batch_reference_locked(struct fd_batch **ptr, struct fd_batch *batch)
243 {
244 struct fd_batch *old_batch = *ptr;
245
246 /* only need lock if a reference is dropped: */
247 if (old_batch)
248 fd_context_assert_locked(old_batch->ctx);
249
250 if (pipe_reference_described(&(*ptr)->reference, &batch->reference,
251 (debug_reference_descriptor)__fd_batch_describe))
252 __fd_batch_destroy(old_batch);
253
254 *ptr = batch;
255 }
256
257 static inline void
258 fd_batch_reference(struct fd_batch **ptr, struct fd_batch *batch)
259 {
260 struct fd_batch *old_batch = *ptr;
261 struct fd_context *ctx = old_batch ? old_batch->ctx : NULL;
262
263 if (ctx)
264 fd_context_lock(ctx);
265
266 fd_batch_reference_locked(ptr, batch);
267
268 if (ctx)
269 fd_context_unlock(ctx);
270 }
271
272 #include "freedreno_context.h"
273
274 static inline void
275 fd_reset_wfi(struct fd_batch *batch)
276 {
277 batch->needs_wfi = true;
278 }
279
280 void fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring);
281
282 /* emit a CP_EVENT_WRITE:
283 */
284 static inline void
285 fd_event_write(struct fd_batch *batch, struct fd_ringbuffer *ring,
286 enum vgt_event_type evt)
287 {
288 OUT_PKT3(ring, CP_EVENT_WRITE, 1);
289 OUT_RING(ring, evt);
290 fd_reset_wfi(batch);
291 }
292
293 #endif /* FREEDRENO_BATCH_H_ */