freedreno/a6xx: Move resolve blits to an IB
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_fence.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_query_hw.h"
37
38 static void
39 batch_init(struct fd_batch *batch)
40 {
41 struct fd_context *ctx = batch->ctx;
42 unsigned size = 0;
43
44 if (ctx->screen->reorder)
45 util_queue_fence_init(&batch->flush_fence);
46
47 /* if kernel is too old to support unlimited # of cmd buffers, we
48 * have no option but to allocate large worst-case sizes so that
49 * we don't need to grow the ringbuffer. Performance is likely to
50 * suffer, but there is no good alternative.
51 *
52 * XXX I think we can just require new enough kernel for this?
53 */
54 if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
55 (fd_mesa_debug & FD_DBG_NOGROW)){
56 size = 0x100000;
57 }
58
59 batch->submit = fd_submit_new(ctx->pipe);
60 if (batch->nondraw) {
61 batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
62 FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
63 } else {
64 batch->gmem = fd_submit_new_ringbuffer(batch->submit, size,
65 FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
66 batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
67 FD_RINGBUFFER_GROWABLE);
68
69 if (ctx->screen->gpu_id < 600) {
70 batch->binning = fd_submit_new_ringbuffer(batch->submit,
71 size, FD_RINGBUFFER_GROWABLE);
72 }
73 }
74
75 batch->in_fence_fd = -1;
76 batch->fence = fd_fence_create(batch);
77
78 batch->cleared = 0;
79 batch->invalidated = 0;
80 batch->restore = batch->resolve = 0;
81 batch->needs_flush = false;
82 batch->flushed = false;
83 batch->gmem_reason = 0;
84 batch->num_draws = 0;
85 batch->stage = FD_STAGE_NULL;
86
87 fd_reset_wfi(batch);
88
89 util_dynarray_init(&batch->draw_patches, NULL);
90
91 if (is_a3xx(ctx->screen))
92 util_dynarray_init(&batch->rbrc_patches, NULL);
93
94 util_dynarray_init(&batch->gmem_patches, NULL);
95
96 assert(batch->resources->entries == 0);
97
98 util_dynarray_init(&batch->samples, NULL);
99 }
100
101 struct fd_batch *
102 fd_batch_create(struct fd_context *ctx, bool nondraw)
103 {
104 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
105
106 if (!batch)
107 return NULL;
108
109 DBG("%p", batch);
110
111 pipe_reference_init(&batch->reference, 1);
112 batch->ctx = ctx;
113 batch->nondraw = nondraw;
114
115 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
116 _mesa_key_pointer_equal);
117
118 batch_init(batch);
119
120 return batch;
121 }
122
123 static void
124 batch_fini(struct fd_batch *batch)
125 {
126 DBG("%p", batch);
127
128 pipe_resource_reference(&batch->query_buf, NULL);
129
130 if (batch->in_fence_fd != -1)
131 close(batch->in_fence_fd);
132
133 /* in case batch wasn't flushed but fence was created: */
134 fd_fence_populate(batch->fence, 0, -1);
135
136 fd_fence_ref(NULL, &batch->fence, NULL);
137
138 fd_ringbuffer_del(batch->draw);
139 if (!batch->nondraw) {
140 if (batch->binning)
141 fd_ringbuffer_del(batch->binning);
142 fd_ringbuffer_del(batch->gmem);
143 } else {
144 debug_assert(!batch->binning);
145 debug_assert(!batch->gmem);
146 }
147
148 if (batch->lrz_clear) {
149 fd_ringbuffer_del(batch->lrz_clear);
150 batch->lrz_clear = NULL;
151 }
152
153 if (batch->tile_setup) {
154 fd_ringbuffer_del(batch->tile_setup);
155 batch->tile_setup = NULL;
156 }
157
158 if (batch->tile_fini) {
159 fd_ringbuffer_del(batch->tile_fini);
160 batch->tile_fini = NULL;
161 }
162
163 fd_submit_del(batch->submit);
164
165 util_dynarray_fini(&batch->draw_patches);
166
167 if (is_a3xx(batch->ctx->screen))
168 util_dynarray_fini(&batch->rbrc_patches);
169
170 util_dynarray_fini(&batch->gmem_patches);
171
172 while (batch->samples.size > 0) {
173 struct fd_hw_sample *samp =
174 util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
175 fd_hw_sample_reference(batch->ctx, &samp, NULL);
176 }
177 util_dynarray_fini(&batch->samples);
178
179 if (batch->ctx->screen->reorder)
180 util_queue_fence_destroy(&batch->flush_fence);
181 }
182
183 static void
184 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
185 {
186 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
187 struct fd_batch *dep;
188
189 foreach_batch(dep, cache, batch->dependents_mask) {
190 if (flush)
191 fd_batch_flush(dep, false, false);
192 fd_batch_reference(&dep, NULL);
193 }
194
195 batch->dependents_mask = 0;
196 }
197
198 static void
199 batch_reset_resources_locked(struct fd_batch *batch)
200 {
201 pipe_mutex_assert_locked(batch->ctx->screen->lock);
202
203 set_foreach(batch->resources, entry) {
204 struct fd_resource *rsc = (struct fd_resource *)entry->key;
205 _mesa_set_remove(batch->resources, entry);
206 debug_assert(rsc->batch_mask & (1 << batch->idx));
207 rsc->batch_mask &= ~(1 << batch->idx);
208 if (rsc->write_batch == batch)
209 fd_batch_reference_locked(&rsc->write_batch, NULL);
210 }
211 }
212
213 static void
214 batch_reset_resources(struct fd_batch *batch)
215 {
216 mtx_lock(&batch->ctx->screen->lock);
217 batch_reset_resources_locked(batch);
218 mtx_unlock(&batch->ctx->screen->lock);
219 }
220
221 static void
222 batch_reset(struct fd_batch *batch)
223 {
224 DBG("%p", batch);
225
226 fd_batch_sync(batch);
227
228 batch_flush_reset_dependencies(batch, false);
229 batch_reset_resources(batch);
230
231 batch_fini(batch);
232 batch_init(batch);
233 }
234
235 void
236 fd_batch_reset(struct fd_batch *batch)
237 {
238 if (batch->needs_flush)
239 batch_reset(batch);
240 }
241
242 void
243 __fd_batch_destroy(struct fd_batch *batch)
244 {
245 struct fd_context *ctx = batch->ctx;
246
247 DBG("%p", batch);
248
249 fd_context_assert_locked(batch->ctx);
250
251 fd_bc_invalidate_batch(batch, true);
252
253 batch_reset_resources_locked(batch);
254 debug_assert(batch->resources->entries == 0);
255 _mesa_set_destroy(batch->resources, NULL);
256
257 fd_context_unlock(ctx);
258 batch_flush_reset_dependencies(batch, false);
259 debug_assert(batch->dependents_mask == 0);
260
261 util_copy_framebuffer_state(&batch->framebuffer, NULL);
262 batch_fini(batch);
263 free(batch);
264 fd_context_lock(ctx);
265 }
266
267 void
268 __fd_batch_describe(char* buf, const struct fd_batch *batch)
269 {
270 util_sprintf(buf, "fd_batch<%u>", batch->seqno);
271 }
272
273 void
274 fd_batch_sync(struct fd_batch *batch)
275 {
276 if (!batch->ctx->screen->reorder)
277 return;
278 util_queue_fence_wait(&batch->flush_fence);
279 }
280
281 static void
282 batch_flush_func(void *job, int id)
283 {
284 struct fd_batch *batch = job;
285
286 DBG("%p", batch);
287
288 fd_gmem_render_tiles(batch);
289 batch_reset_resources(batch);
290 }
291
292 static void
293 batch_cleanup_func(void *job, int id)
294 {
295 struct fd_batch *batch = job;
296 fd_batch_reference(&batch, NULL);
297 }
298
299 static void
300 batch_flush(struct fd_batch *batch, bool force)
301 {
302 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
303
304 if (batch->flushed)
305 return;
306
307 batch->needs_flush = false;
308
309 /* close out the draw cmds by making sure any active queries are
310 * paused:
311 */
312 fd_batch_set_stage(batch, FD_STAGE_NULL);
313
314 batch_flush_reset_dependencies(batch, true);
315
316 batch->flushed = true;
317
318 if (batch->ctx->screen->reorder) {
319 struct fd_batch *tmp = NULL;
320 fd_batch_reference(&tmp, batch);
321
322 if (!util_queue_is_initialized(&batch->ctx->flush_queue))
323 util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
324
325 util_queue_add_job(&batch->ctx->flush_queue,
326 batch, &batch->flush_fence,
327 batch_flush_func, batch_cleanup_func);
328 } else {
329 fd_gmem_render_tiles(batch);
330 batch_reset_resources(batch);
331 }
332
333 debug_assert(batch->reference.count > 0);
334
335 mtx_lock(&batch->ctx->screen->lock);
336 fd_bc_invalidate_batch(batch, false);
337 mtx_unlock(&batch->ctx->screen->lock);
338 }
339
340 /* NOTE: could drop the last ref to batch
341 *
342 * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
343 * to kernel before this returns, as opposed to just being queued to be
344 * flushed
345 * @force: force a flush even if no rendering, mostly useful if you need
346 * a fence to sync on
347 */
348 void
349 fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
350 {
351 struct fd_batch *tmp = NULL;
352 bool newbatch = false;
353
354 /* NOTE: we need to hold an extra ref across the body of flush,
355 * since the last ref to this batch could be dropped when cleaning
356 * up used_resources
357 */
358 fd_batch_reference(&tmp, batch);
359
360 if (batch == batch->ctx->batch) {
361 batch->ctx->batch = NULL;
362 newbatch = true;
363 }
364
365 batch_flush(tmp, force);
366
367 if (newbatch) {
368 struct fd_context *ctx = batch->ctx;
369 struct fd_batch *new_batch;
370
371 if (ctx->screen->reorder) {
372 /* defer allocating new batch until one is needed for rendering
373 * to avoid unused batches for apps that create many contexts
374 */
375 new_batch = NULL;
376 } else {
377 new_batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, false);
378 util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer);
379 }
380
381 fd_batch_reference(&batch, NULL);
382 ctx->batch = new_batch;
383 fd_context_all_dirty(ctx);
384 }
385
386 if (sync)
387 fd_batch_sync(tmp);
388
389 fd_batch_reference(&tmp, NULL);
390 }
391
392 /* does 'batch' depend directly or indirectly on 'other' ? */
393 static bool
394 batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
395 {
396 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
397 struct fd_batch *dep;
398
399 if (batch->dependents_mask & (1 << other->idx))
400 return true;
401
402 foreach_batch(dep, cache, batch->dependents_mask)
403 if (batch_depends_on(batch, dep))
404 return true;
405
406 return false;
407 }
408
409 void
410 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
411 {
412 if (batch->dependents_mask & (1 << dep->idx))
413 return;
414
415 /* a loop should not be possible */
416 debug_assert(!batch_depends_on(dep, batch));
417
418 struct fd_batch *other = NULL;
419 fd_batch_reference_locked(&other, dep);
420 batch->dependents_mask |= (1 << dep->idx);
421 DBG("%p: added dependency on %p", batch, dep);
422 }
423
424 static void
425 flush_write_batch(struct fd_resource *rsc)
426 {
427 struct fd_batch *b = NULL;
428 fd_batch_reference(&b, rsc->write_batch);
429
430 mtx_unlock(&b->ctx->screen->lock);
431 fd_batch_flush(b, true, false);
432 mtx_lock(&b->ctx->screen->lock);
433
434 fd_bc_invalidate_batch(b, false);
435 fd_batch_reference_locked(&b, NULL);
436 }
437
438 void
439 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
440 {
441 pipe_mutex_assert_locked(batch->ctx->screen->lock);
442
443 if (rsc->stencil)
444 fd_batch_resource_used(batch, rsc->stencil, write);
445
446 DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
447
448 if (write)
449 rsc->valid = true;
450
451 /* note, invalidate write batch, to avoid further writes to rsc
452 * resulting in a write-after-read hazard.
453 */
454
455 if (write) {
456 /* if we are pending read or write by any other batch: */
457 if (rsc->batch_mask & ~(1 << batch->idx)) {
458 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
459 struct fd_batch *dep;
460
461 if (rsc->write_batch && rsc->write_batch != batch)
462 flush_write_batch(rsc);
463
464 foreach_batch(dep, cache, rsc->batch_mask) {
465 struct fd_batch *b = NULL;
466 if (dep == batch)
467 continue;
468 /* note that batch_add_dep could flush and unref dep, so
469 * we need to hold a reference to keep it live for the
470 * fd_bc_invalidate_batch()
471 */
472 fd_batch_reference(&b, dep);
473 fd_batch_add_dep(batch, b);
474 fd_bc_invalidate_batch(b, false);
475 fd_batch_reference_locked(&b, NULL);
476 }
477 }
478 fd_batch_reference_locked(&rsc->write_batch, batch);
479 } else {
480 /* If reading a resource pending a write, go ahead and flush the
481 * writer. This avoids situations where we end up having to
482 * flush the current batch in _resource_used()
483 */
484 if (rsc->write_batch && rsc->write_batch != batch)
485 flush_write_batch(rsc);
486 }
487
488 if (rsc->batch_mask & (1 << batch->idx)) {
489 debug_assert(_mesa_set_search(batch->resources, rsc));
490 return;
491 }
492
493 debug_assert(!_mesa_set_search(batch->resources, rsc));
494
495 _mesa_set_add(batch->resources, rsc);
496 rsc->batch_mask |= (1 << batch->idx);
497 }
498
499 void
500 fd_batch_check_size(struct fd_batch *batch)
501 {
502 debug_assert(!batch->flushed);
503
504 if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
505 fd_batch_flush(batch, true, false);
506 return;
507 }
508
509 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
510 return;
511
512 struct fd_ringbuffer *ring = batch->draw;
513 if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
514 fd_batch_flush(batch, true, false);
515 }
516
517 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
518 * been one since last draw:
519 */
520 void
521 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
522 {
523 if (batch->needs_wfi) {
524 if (batch->ctx->screen->gpu_id >= 500)
525 OUT_WFI5(ring);
526 else
527 OUT_WFI(ring);
528 batch->needs_wfi = false;
529 }
530 }