freedreno: some locking
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_resource.h"
35 #include "freedreno_query_hw.h"
36
37 static void
38 batch_init(struct fd_batch *batch)
39 {
40 struct fd_context *ctx = batch->ctx;
41 unsigned size = 0;
42
43 if (ctx->screen->reorder)
44 util_queue_fence_init(&batch->flush_fence);
45
46 /* if kernel is too old to support unlimited # of cmd buffers, we
47 * have no option but to allocate large worst-case sizes so that
48 * we don't need to grow the ringbuffer. Performance is likely to
49 * suffer, but there is no good alternative.
50 */
51 if (fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) {
52 size = 0x100000;
53 }
54
55 batch->draw = fd_ringbuffer_new(ctx->screen->pipe, size);
56 batch->binning = fd_ringbuffer_new(ctx->screen->pipe, size);
57 batch->gmem = fd_ringbuffer_new(ctx->screen->pipe, size);
58
59 fd_ringbuffer_set_parent(batch->gmem, NULL);
60 fd_ringbuffer_set_parent(batch->draw, batch->gmem);
61 fd_ringbuffer_set_parent(batch->binning, batch->gmem);
62
63 batch->cleared = batch->partial_cleared = 0;
64 batch->restore = batch->resolve = 0;
65 batch->needs_flush = false;
66 batch->gmem_reason = 0;
67 batch->num_draws = 0;
68 batch->stage = FD_STAGE_NULL;
69
70 fd_reset_wfi(batch);
71
72 /* reset maximal bounds: */
73 batch->max_scissor.minx = batch->max_scissor.miny = ~0;
74 batch->max_scissor.maxx = batch->max_scissor.maxy = 0;
75
76 util_dynarray_init(&batch->draw_patches);
77
78 if (is_a3xx(ctx->screen))
79 util_dynarray_init(&batch->rbrc_patches);
80
81 assert(batch->resources->entries == 0);
82
83 util_dynarray_init(&batch->samples);
84 }
85
86 struct fd_batch *
87 fd_batch_create(struct fd_context *ctx)
88 {
89 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
90
91 if (!batch)
92 return NULL;
93
94 DBG("%p", batch);
95
96 pipe_reference_init(&batch->reference, 1);
97 batch->ctx = ctx;
98
99 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
100 _mesa_key_pointer_equal);
101
102 batch_init(batch);
103
104 return batch;
105 }
106
107 static void
108 batch_fini(struct fd_batch *batch)
109 {
110 pipe_resource_reference(&batch->query_buf, NULL);
111
112 fd_ringbuffer_del(batch->draw);
113 fd_ringbuffer_del(batch->binning);
114 fd_ringbuffer_del(batch->gmem);
115
116 util_dynarray_fini(&batch->draw_patches);
117
118 if (is_a3xx(batch->ctx->screen))
119 util_dynarray_fini(&batch->rbrc_patches);
120
121 while (batch->samples.size > 0) {
122 struct fd_hw_sample *samp =
123 util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
124 fd_hw_sample_reference(batch->ctx, &samp, NULL);
125 }
126 util_dynarray_fini(&batch->samples);
127
128 if (batch->ctx->screen->reorder)
129 util_queue_fence_destroy(&batch->flush_fence);
130 }
131
132 static void
133 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
134 {
135 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
136 struct fd_batch *dep;
137
138 foreach_batch(dep, cache, batch->dependents_mask) {
139 if (flush)
140 fd_batch_flush(dep, false);
141 fd_batch_reference(&dep, NULL);
142 }
143
144 batch->dependents_mask = 0;
145 }
146
147 static void
148 batch_reset_resources_locked(struct fd_batch *batch)
149 {
150 struct set_entry *entry;
151
152 pipe_mutex_assert_locked(batch->ctx->screen->lock);
153
154 set_foreach(batch->resources, entry) {
155 struct fd_resource *rsc = (struct fd_resource *)entry->key;
156 _mesa_set_remove(batch->resources, entry);
157 debug_assert(rsc->batch_mask & (1 << batch->idx));
158 rsc->batch_mask &= ~(1 << batch->idx);
159 if (rsc->write_batch == batch)
160 fd_batch_reference_locked(&rsc->write_batch, NULL);
161 }
162 }
163
164 static void
165 batch_reset_resources(struct fd_batch *batch)
166 {
167 pipe_mutex_lock(batch->ctx->screen->lock);
168 batch_reset_resources_locked(batch);
169 pipe_mutex_unlock(batch->ctx->screen->lock);
170 }
171
172 static void
173 batch_reset(struct fd_batch *batch)
174 {
175 DBG("%p", batch);
176
177 fd_batch_sync(batch);
178
179 batch_flush_reset_dependencies(batch, false);
180 batch_reset_resources(batch);
181
182 batch_fini(batch);
183 batch_init(batch);
184 }
185
186 void
187 fd_batch_reset(struct fd_batch *batch)
188 {
189 if (batch->needs_flush)
190 batch_reset(batch);
191 }
192
193 void
194 __fd_batch_destroy(struct fd_batch *batch)
195 {
196 DBG("%p", batch);
197
198 util_copy_framebuffer_state(&batch->framebuffer, NULL);
199
200 pipe_mutex_lock(batch->ctx->screen->lock);
201 fd_bc_invalidate_batch(batch, true);
202 pipe_mutex_unlock(batch->ctx->screen->lock);
203
204 batch_fini(batch);
205
206 batch_reset_resources(batch);
207 debug_assert(batch->resources->entries == 0);
208 _mesa_set_destroy(batch->resources, NULL);
209
210 batch_flush_reset_dependencies(batch, false);
211 debug_assert(batch->dependents_mask == 0);
212
213 free(batch);
214 }
215
216 void
217 __fd_batch_describe(char* buf, const struct fd_batch *batch)
218 {
219 util_sprintf(buf, "fd_batch<%u>", batch->seqno);
220 }
221
222 void
223 fd_batch_sync(struct fd_batch *batch)
224 {
225 if (!batch->ctx->screen->reorder)
226 return;
227 util_queue_job_wait(&batch->flush_fence);
228 }
229
230 static void
231 batch_flush_func(void *job, int id)
232 {
233 struct fd_batch *batch = job;
234
235 fd_gmem_render_tiles(batch);
236 batch_reset_resources(batch);
237 batch->ctx->last_fence = fd_ringbuffer_timestamp(batch->gmem);
238 }
239
240 static void
241 batch_cleanup_func(void *job, int id)
242 {
243 struct fd_batch *batch = job;
244 fd_batch_reference(&batch, NULL);
245 }
246
247 static void
248 batch_flush(struct fd_batch *batch)
249 {
250 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
251
252 if (!batch->needs_flush)
253 return;
254
255 batch->needs_flush = false;
256
257 /* close out the draw cmds by making sure any active queries are
258 * paused:
259 */
260 fd_hw_query_set_stage(batch, batch->draw, FD_STAGE_NULL);
261
262 batch->ctx->dirty = ~0;
263 batch_flush_reset_dependencies(batch, true);
264
265 if (batch->ctx->screen->reorder) {
266 struct fd_batch *tmp = NULL;
267 fd_batch_reference(&tmp, batch);
268 util_queue_add_job(&batch->ctx->flush_queue,
269 batch, &batch->flush_fence,
270 batch_flush_func, batch_cleanup_func);
271 } else {
272 fd_gmem_render_tiles(batch);
273 batch_reset_resources(batch);
274 batch->ctx->last_fence = fd_ringbuffer_timestamp(batch->gmem);
275 }
276
277 debug_assert(batch->reference.count > 0);
278
279 if (batch == batch->ctx->batch) {
280 batch_reset(batch);
281 } else {
282 pipe_mutex_lock(batch->ctx->screen->lock);
283 fd_bc_invalidate_batch(batch, false);
284 pipe_mutex_unlock(batch->ctx->screen->lock);
285 }
286 }
287
288 /* NOTE: could drop the last ref to batch */
289 void
290 fd_batch_flush(struct fd_batch *batch, bool sync)
291 {
292 /* NOTE: we need to hold an extra ref across the body of flush,
293 * since the last ref to this batch could be dropped when cleaning
294 * up used_resources
295 */
296 struct fd_batch *tmp = NULL;
297 fd_batch_reference(&tmp, batch);
298 batch_flush(tmp);
299 if (sync)
300 fd_batch_sync(tmp);
301 fd_batch_reference(&tmp, NULL);
302 }
303
304 /* does 'batch' depend directly or indirectly on 'other' ? */
305 static bool
306 batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
307 {
308 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
309 struct fd_batch *dep;
310
311 if (batch->dependents_mask & (1 << other->idx))
312 return true;
313
314 foreach_batch(dep, cache, batch->dependents_mask)
315 if (batch_depends_on(batch, dep))
316 return true;
317
318 return false;
319 }
320
321 static void
322 batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
323 {
324 if (batch->dependents_mask & (1 << dep->idx))
325 return;
326
327 /* if the new depedency already depends on us, we need to flush
328 * to avoid a loop in the dependency graph.
329 */
330 if (batch_depends_on(dep, batch)) {
331 DBG("%p: flush forced on %p!", batch, dep);
332 pipe_mutex_unlock(batch->ctx->screen->lock);
333 fd_batch_flush(dep, false);
334 pipe_mutex_lock(batch->ctx->screen->lock);
335 } else {
336 struct fd_batch *other = NULL;
337 fd_batch_reference_locked(&other, dep);
338 batch->dependents_mask |= (1 << dep->idx);
339 DBG("%p: added dependency on %p", batch, dep);
340 }
341 }
342
343 void
344 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
345 {
346 pipe_mutex_assert_locked(batch->ctx->screen->lock);
347
348 if (rsc->stencil)
349 fd_batch_resource_used(batch, rsc->stencil, write);
350
351 DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
352
353 /* note, invalidate write batch, to avoid further writes to rsc
354 * resulting in a write-after-read hazard.
355 */
356
357 if (write) {
358 /* if we are pending read or write by any other batch: */
359 if (rsc->batch_mask != (1 << batch->idx)) {
360 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
361 struct fd_batch *dep;
362 foreach_batch(dep, cache, rsc->batch_mask) {
363 struct fd_batch *b = NULL;
364 /* note that batch_add_dep could flush and unref dep, so
365 * we need to hold a reference to keep it live for the
366 * fd_bc_invalidate_batch()
367 */
368 fd_batch_reference(&b, dep);
369 batch_add_dep(batch, b);
370 fd_bc_invalidate_batch(b, false);
371 fd_batch_reference_locked(&b, NULL);
372 }
373 }
374 fd_batch_reference_locked(&rsc->write_batch, batch);
375 } else {
376 if (rsc->write_batch) {
377 batch_add_dep(batch, rsc->write_batch);
378 fd_bc_invalidate_batch(rsc->write_batch, false);
379 }
380 }
381
382 if (rsc->batch_mask & (1 << batch->idx))
383 return;
384
385 debug_assert(!_mesa_set_search(batch->resources, rsc));
386
387 _mesa_set_add(batch->resources, rsc);
388 rsc->batch_mask |= (1 << batch->idx);
389 }
390
391 void
392 fd_batch_check_size(struct fd_batch *batch)
393 {
394 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
395 return;
396
397 struct fd_ringbuffer *ring = batch->draw;
398 if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
399 (fd_mesa_debug & FD_DBG_FLUSH))
400 fd_batch_flush(batch, true);
401 }