freedreno: fix potential hang when destroying batch
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_fence.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_query_hw.h"
37
38 static void
39 batch_init(struct fd_batch *batch)
40 {
41 struct fd_context *ctx = batch->ctx;
42 unsigned size = 0;
43
44 if (ctx->screen->reorder)
45 util_queue_fence_init(&batch->flush_fence);
46
47 /* if kernel is too old to support unlimited # of cmd buffers, we
48 * have no option but to allocate large worst-case sizes so that
49 * we don't need to grow the ringbuffer. Performance is likely to
50 * suffer, but there is no good alternative.
51 */
52 if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
53 (fd_mesa_debug & FD_DBG_NOGROW)){
54 size = 0x100000;
55 }
56
57 batch->draw = fd_ringbuffer_new(ctx->pipe, size);
58 if (!batch->nondraw) {
59 batch->binning = fd_ringbuffer_new(ctx->pipe, size);
60 batch->gmem = fd_ringbuffer_new(ctx->pipe, size);
61
62 fd_ringbuffer_set_parent(batch->gmem, NULL);
63 fd_ringbuffer_set_parent(batch->draw, batch->gmem);
64 fd_ringbuffer_set_parent(batch->binning, batch->gmem);
65 } else {
66 fd_ringbuffer_set_parent(batch->draw, NULL);
67 }
68
69 batch->in_fence_fd = -1;
70 batch->fence = fd_fence_create(batch);
71
72 batch->cleared = 0;
73 batch->restore = batch->resolve = 0;
74 batch->needs_flush = false;
75 batch->flushed = false;
76 batch->gmem_reason = 0;
77 batch->num_draws = 0;
78 batch->stage = FD_STAGE_NULL;
79
80 fd_reset_wfi(batch);
81
82 util_dynarray_init(&batch->draw_patches, NULL);
83
84 if (is_a3xx(ctx->screen))
85 util_dynarray_init(&batch->rbrc_patches, NULL);
86
87 util_dynarray_init(&batch->gmem_patches, NULL);
88
89 assert(batch->resources->entries == 0);
90
91 util_dynarray_init(&batch->samples, NULL);
92 }
93
94 struct fd_batch *
95 fd_batch_create(struct fd_context *ctx, bool nondraw)
96 {
97 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
98
99 if (!batch)
100 return NULL;
101
102 DBG("%p", batch);
103
104 pipe_reference_init(&batch->reference, 1);
105 batch->ctx = ctx;
106 batch->nondraw = nondraw;
107
108 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
109 _mesa_key_pointer_equal);
110
111 batch_init(batch);
112
113 return batch;
114 }
115
116 static void
117 batch_fini(struct fd_batch *batch)
118 {
119 DBG("%p", batch);
120
121 pipe_resource_reference(&batch->query_buf, NULL);
122
123 if (batch->in_fence_fd != -1)
124 close(batch->in_fence_fd);
125
126 /* in case batch wasn't flushed but fence was created: */
127 fd_fence_populate(batch->fence, 0, -1);
128
129 fd_fence_ref(NULL, &batch->fence, NULL);
130
131 fd_ringbuffer_del(batch->draw);
132 if (!batch->nondraw) {
133 fd_ringbuffer_del(batch->binning);
134 fd_ringbuffer_del(batch->gmem);
135 } else {
136 debug_assert(!batch->binning);
137 debug_assert(!batch->gmem);
138 }
139 if (batch->lrz_clear) {
140 fd_ringbuffer_del(batch->lrz_clear);
141 batch->lrz_clear = NULL;
142 }
143
144 util_dynarray_fini(&batch->draw_patches);
145
146 if (is_a3xx(batch->ctx->screen))
147 util_dynarray_fini(&batch->rbrc_patches);
148
149 util_dynarray_fini(&batch->gmem_patches);
150
151 while (batch->samples.size > 0) {
152 struct fd_hw_sample *samp =
153 util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
154 fd_hw_sample_reference(batch->ctx, &samp, NULL);
155 }
156 util_dynarray_fini(&batch->samples);
157
158 if (batch->ctx->screen->reorder)
159 util_queue_fence_destroy(&batch->flush_fence);
160 }
161
162 static void
163 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
164 {
165 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
166 struct fd_batch *dep;
167
168 foreach_batch(dep, cache, batch->dependents_mask) {
169 if (flush)
170 fd_batch_flush(dep, false, false);
171 fd_batch_reference(&dep, NULL);
172 }
173
174 batch->dependents_mask = 0;
175 }
176
177 static void
178 batch_reset_resources_locked(struct fd_batch *batch)
179 {
180 struct set_entry *entry;
181
182 pipe_mutex_assert_locked(batch->ctx->screen->lock);
183
184 set_foreach(batch->resources, entry) {
185 struct fd_resource *rsc = (struct fd_resource *)entry->key;
186 _mesa_set_remove(batch->resources, entry);
187 debug_assert(rsc->batch_mask & (1 << batch->idx));
188 rsc->batch_mask &= ~(1 << batch->idx);
189 if (rsc->write_batch == batch)
190 fd_batch_reference_locked(&rsc->write_batch, NULL);
191 }
192 }
193
194 static void
195 batch_reset_resources(struct fd_batch *batch)
196 {
197 mtx_lock(&batch->ctx->screen->lock);
198 batch_reset_resources_locked(batch);
199 mtx_unlock(&batch->ctx->screen->lock);
200 }
201
202 static void
203 batch_reset(struct fd_batch *batch)
204 {
205 DBG("%p", batch);
206
207 fd_batch_sync(batch);
208
209 batch_flush_reset_dependencies(batch, false);
210 batch_reset_resources(batch);
211
212 batch_fini(batch);
213 batch_init(batch);
214 }
215
216 void
217 fd_batch_reset(struct fd_batch *batch)
218 {
219 if (batch->needs_flush)
220 batch_reset(batch);
221 }
222
223 void
224 __fd_batch_destroy(struct fd_batch *batch)
225 {
226 struct fd_context *ctx = batch->ctx;
227
228 DBG("%p", batch);
229
230 fd_context_assert_locked(batch->ctx);
231
232 fd_bc_invalidate_batch(batch, true);
233
234 batch_reset_resources_locked(batch);
235 debug_assert(batch->resources->entries == 0);
236 _mesa_set_destroy(batch->resources, NULL);
237
238 fd_context_unlock(ctx);
239 batch_flush_reset_dependencies(batch, false);
240 debug_assert(batch->dependents_mask == 0);
241
242 util_copy_framebuffer_state(&batch->framebuffer, NULL);
243 batch_fini(batch);
244 free(batch);
245 fd_context_lock(ctx);
246 }
247
248 void
249 __fd_batch_describe(char* buf, const struct fd_batch *batch)
250 {
251 util_sprintf(buf, "fd_batch<%u>", batch->seqno);
252 }
253
254 void
255 fd_batch_sync(struct fd_batch *batch)
256 {
257 if (!batch->ctx->screen->reorder)
258 return;
259 util_queue_fence_wait(&batch->flush_fence);
260 }
261
262 static void
263 batch_flush_func(void *job, int id)
264 {
265 struct fd_batch *batch = job;
266
267 DBG("%p", batch);
268
269 fd_gmem_render_tiles(batch);
270 batch_reset_resources(batch);
271 }
272
273 static void
274 batch_cleanup_func(void *job, int id)
275 {
276 struct fd_batch *batch = job;
277 fd_batch_reference(&batch, NULL);
278 }
279
280 static void
281 batch_flush(struct fd_batch *batch, bool force)
282 {
283 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
284
285 if (batch->flushed)
286 return;
287
288 batch->needs_flush = false;
289
290 /* close out the draw cmds by making sure any active queries are
291 * paused:
292 */
293 fd_batch_set_stage(batch, FD_STAGE_NULL);
294
295 batch_flush_reset_dependencies(batch, true);
296
297 batch->flushed = true;
298
299 if (batch->ctx->screen->reorder) {
300 struct fd_batch *tmp = NULL;
301 fd_batch_reference(&tmp, batch);
302
303 if (!util_queue_is_initialized(&batch->ctx->flush_queue))
304 util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
305
306 util_queue_add_job(&batch->ctx->flush_queue,
307 batch, &batch->flush_fence,
308 batch_flush_func, batch_cleanup_func);
309 } else {
310 fd_gmem_render_tiles(batch);
311 batch_reset_resources(batch);
312 }
313
314 debug_assert(batch->reference.count > 0);
315
316 mtx_lock(&batch->ctx->screen->lock);
317 fd_bc_invalidate_batch(batch, false);
318 mtx_unlock(&batch->ctx->screen->lock);
319 }
320
321 /* NOTE: could drop the last ref to batch
322 *
323 * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
324 * to kernel before this returns, as opposed to just being queued to be
325 * flushed
326 * @force: force a flush even if no rendering, mostly useful if you need
327 * a fence to sync on
328 */
329 void
330 fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
331 {
332 struct fd_batch *tmp = NULL;
333 bool newbatch = false;
334
335 /* NOTE: we need to hold an extra ref across the body of flush,
336 * since the last ref to this batch could be dropped when cleaning
337 * up used_resources
338 */
339 fd_batch_reference(&tmp, batch);
340
341 if (batch == batch->ctx->batch) {
342 batch->ctx->batch = NULL;
343 newbatch = true;
344 }
345
346 batch_flush(tmp, force);
347
348 if (newbatch) {
349 struct fd_context *ctx = batch->ctx;
350 struct fd_batch *new_batch;
351
352 if (ctx->screen->reorder) {
353 /* defer allocating new batch until one is needed for rendering
354 * to avoid unused batches for apps that create many contexts
355 */
356 new_batch = NULL;
357 } else {
358 new_batch = fd_batch_create(ctx, false);
359 util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer);
360 }
361
362 fd_batch_reference(&batch, NULL);
363 ctx->batch = new_batch;
364 }
365
366 if (sync)
367 fd_batch_sync(tmp);
368
369 fd_batch_reference(&tmp, NULL);
370 }
371
372 /* does 'batch' depend directly or indirectly on 'other' ? */
373 static bool
374 batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
375 {
376 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
377 struct fd_batch *dep;
378
379 if (batch->dependents_mask & (1 << other->idx))
380 return true;
381
382 foreach_batch(dep, cache, batch->dependents_mask)
383 if (batch_depends_on(batch, dep))
384 return true;
385
386 return false;
387 }
388
389 void
390 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
391 {
392 if (batch->dependents_mask & (1 << dep->idx))
393 return;
394
395 /* a loop should not be possible */
396 debug_assert(!batch_depends_on(dep, batch));
397
398 struct fd_batch *other = NULL;
399 fd_batch_reference_locked(&other, dep);
400 batch->dependents_mask |= (1 << dep->idx);
401 DBG("%p: added dependency on %p", batch, dep);
402 }
403
404 static void
405 flush_write_batch(struct fd_resource *rsc)
406 {
407 struct fd_batch *b = NULL;
408 fd_batch_reference(&b, rsc->write_batch);
409
410 mtx_unlock(&b->ctx->screen->lock);
411 fd_batch_flush(b, true, false);
412 mtx_lock(&b->ctx->screen->lock);
413
414 fd_bc_invalidate_batch(b, false);
415 fd_batch_reference_locked(&b, NULL);
416 }
417
418 void
419 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
420 {
421 pipe_mutex_assert_locked(batch->ctx->screen->lock);
422
423 if (rsc->stencil)
424 fd_batch_resource_used(batch, rsc->stencil, write);
425
426 DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
427
428 if (write)
429 rsc->valid = true;
430
431 /* note, invalidate write batch, to avoid further writes to rsc
432 * resulting in a write-after-read hazard.
433 */
434
435 if (write) {
436 /* if we are pending read or write by any other batch: */
437 if (rsc->batch_mask & ~(1 << batch->idx)) {
438 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
439 struct fd_batch *dep;
440
441 if (rsc->write_batch && rsc->write_batch != batch)
442 flush_write_batch(rsc);
443
444 foreach_batch(dep, cache, rsc->batch_mask) {
445 struct fd_batch *b = NULL;
446 if (dep == batch)
447 continue;
448 /* note that batch_add_dep could flush and unref dep, so
449 * we need to hold a reference to keep it live for the
450 * fd_bc_invalidate_batch()
451 */
452 fd_batch_reference(&b, dep);
453 fd_batch_add_dep(batch, b);
454 fd_bc_invalidate_batch(b, false);
455 fd_batch_reference_locked(&b, NULL);
456 }
457 }
458 fd_batch_reference_locked(&rsc->write_batch, batch);
459 } else {
460 /* If reading a resource pending a write, go ahead and flush the
461 * writer. This avoids situations where we end up having to
462 * flush the current batch in _resource_used()
463 */
464 if (rsc->write_batch && rsc->write_batch != batch)
465 flush_write_batch(rsc);
466 }
467
468 if (rsc->batch_mask & (1 << batch->idx))
469 return;
470
471 debug_assert(!_mesa_set_search(batch->resources, rsc));
472
473 _mesa_set_add(batch->resources, rsc);
474 rsc->batch_mask |= (1 << batch->idx);
475 }
476
477 void
478 fd_batch_check_size(struct fd_batch *batch)
479 {
480 debug_assert(!batch->flushed);
481
482 if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
483 fd_batch_flush(batch, true, false);
484 return;
485 }
486
487 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
488 return;
489
490 struct fd_ringbuffer *ring = batch->draw;
491 if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
492 fd_batch_flush(batch, true, false);
493 }
494
495 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
496 * been one since last draw:
497 */
498 void
499 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
500 {
501 if (batch->needs_wfi) {
502 if (batch->ctx->screen->gpu_id >= 500)
503 OUT_WFI5(ring);
504 else
505 OUT_WFI(ring);
506 batch->needs_wfi = false;
507 }
508 }