panfrost: Don't double-create scratchpad
[mesa.git] / src / gallium / drivers / panfrost / pan_job.c
1 /*
2 * Copyright (C) 2019 Alyssa Rosenzweig
3 * Copyright (C) 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 */
25
26 #include <assert.h>
27
28 #include "drm-uapi/panfrost_drm.h"
29
30 #include "pan_bo.h"
31 #include "pan_context.h"
32 #include "util/hash_table.h"
33 #include "util/ralloc.h"
34 #include "util/format/u_format.h"
35 #include "util/u_pack_color.h"
36 #include "pan_util.h"
37 #include "pandecode/decode.h"
38 #include "panfrost-quirks.h"
39
40 /* panfrost_bo_access is here to help us keep track of batch accesses to BOs
41 * and build a proper dependency graph such that batches can be pipelined for
42 * better GPU utilization.
43 *
44 * Each accessed BO has a corresponding entry in the ->accessed_bos hash table.
45 * A BO is either being written or read at any time, that's what the type field
46 * encodes.
47 * When the last access is a write, the batch writing the BO might have read
48 * dependencies (readers that have not been executed yet and want to read the
49 * previous BO content), and when the last access is a read, all readers might
50 * depend on another batch to push its results to memory. That's what the
51 * readers/writers keep track off.
52 * There can only be one writer at any given time, if a new batch wants to
53 * write to the same BO, a dependency will be added between the new writer and
54 * the old writer (at the batch level), and panfrost_bo_access->writer will be
55 * updated to point to the new writer.
56 */
57 struct panfrost_bo_access {
58 uint32_t type;
59 struct util_dynarray readers;
60 struct panfrost_batch_fence *writer;
61 };
62
63 static struct panfrost_batch_fence *
64 panfrost_create_batch_fence(struct panfrost_batch *batch)
65 {
66 struct panfrost_batch_fence *fence;
67 ASSERTED int ret;
68
69 fence = rzalloc(NULL, struct panfrost_batch_fence);
70 assert(fence);
71 pipe_reference_init(&fence->reference, 1);
72 fence->ctx = batch->ctx;
73 fence->batch = batch;
74 ret = drmSyncobjCreate(pan_screen(batch->ctx->base.screen)->fd, 0,
75 &fence->syncobj);
76 assert(!ret);
77
78 return fence;
79 }
80
81 static void
82 panfrost_free_batch_fence(struct panfrost_batch_fence *fence)
83 {
84 drmSyncobjDestroy(pan_screen(fence->ctx->base.screen)->fd,
85 fence->syncobj);
86 ralloc_free(fence);
87 }
88
89 void
90 panfrost_batch_fence_unreference(struct panfrost_batch_fence *fence)
91 {
92 if (pipe_reference(&fence->reference, NULL))
93 panfrost_free_batch_fence(fence);
94 }
95
96 void
97 panfrost_batch_fence_reference(struct panfrost_batch_fence *fence)
98 {
99 pipe_reference(NULL, &fence->reference);
100 }
101
102 static struct panfrost_batch *
103 panfrost_create_batch(struct panfrost_context *ctx,
104 const struct pipe_framebuffer_state *key)
105 {
106 struct panfrost_batch *batch = rzalloc(ctx, struct panfrost_batch);
107
108 batch->ctx = ctx;
109
110 batch->bos = _mesa_hash_table_create(batch, _mesa_hash_pointer,
111 _mesa_key_pointer_equal);
112
113 batch->minx = batch->miny = ~0;
114 batch->maxx = batch->maxy = 0;
115 batch->transient_offset = 0;
116
117 util_dynarray_init(&batch->headers, batch);
118 util_dynarray_init(&batch->gpu_headers, batch);
119 util_dynarray_init(&batch->dependencies, batch);
120 batch->out_sync = panfrost_create_batch_fence(batch);
121 util_copy_framebuffer_state(&batch->key, key);
122
123 return batch;
124 }
125
126 static void
127 panfrost_freeze_batch(struct panfrost_batch *batch)
128 {
129 struct panfrost_context *ctx = batch->ctx;
130 struct hash_entry *entry;
131
132 /* Remove the entry in the FBO -> batch hash table if the batch
133 * matches. This way, next draws/clears targeting this FBO will trigger
134 * the creation of a new batch.
135 */
136 entry = _mesa_hash_table_search(ctx->batches, &batch->key);
137 if (entry && entry->data == batch)
138 _mesa_hash_table_remove(ctx->batches, entry);
139
140 /* If this is the bound batch, the panfrost_context parameters are
141 * relevant so submitting it invalidates those parameters, but if it's
142 * not bound, the context parameters are for some other batch so we
143 * can't invalidate them.
144 */
145 if (ctx->batch == batch) {
146 panfrost_invalidate_frame(ctx);
147 ctx->batch = NULL;
148 }
149 }
150
151 #ifndef NDEBUG
152 static bool panfrost_batch_is_frozen(struct panfrost_batch *batch)
153 {
154 struct panfrost_context *ctx = batch->ctx;
155 struct hash_entry *entry;
156
157 entry = _mesa_hash_table_search(ctx->batches, &batch->key);
158 if (entry && entry->data == batch)
159 return false;
160
161 if (ctx->batch == batch)
162 return false;
163
164 return true;
165 }
166 #endif
167
168 static void
169 panfrost_free_batch(struct panfrost_batch *batch)
170 {
171 if (!batch)
172 return;
173
174 assert(panfrost_batch_is_frozen(batch));
175
176 hash_table_foreach(batch->bos, entry)
177 panfrost_bo_unreference((struct panfrost_bo *)entry->key);
178
179 util_dynarray_foreach(&batch->dependencies,
180 struct panfrost_batch_fence *, dep) {
181 panfrost_batch_fence_unreference(*dep);
182 }
183
184 /* The out_sync fence lifetime is different from the the batch one
185 * since other batches might want to wait on a fence of already
186 * submitted/signaled batch. All we need to do here is make sure the
187 * fence does not point to an invalid batch, which the core will
188 * interpret as 'batch is already submitted'.
189 */
190 batch->out_sync->batch = NULL;
191 panfrost_batch_fence_unreference(batch->out_sync);
192
193 util_unreference_framebuffer_state(&batch->key);
194 ralloc_free(batch);
195 }
196
197 #ifndef NDEBUG
198 static bool
199 panfrost_dep_graph_contains_batch(struct panfrost_batch *root,
200 struct panfrost_batch *batch)
201 {
202 if (!root)
203 return false;
204
205 util_dynarray_foreach(&root->dependencies,
206 struct panfrost_batch_fence *, dep) {
207 if ((*dep)->batch == batch ||
208 panfrost_dep_graph_contains_batch((*dep)->batch, batch))
209 return true;
210 }
211
212 return false;
213 }
214 #endif
215
216 static void
217 panfrost_batch_add_dep(struct panfrost_batch *batch,
218 struct panfrost_batch_fence *newdep)
219 {
220 if (batch == newdep->batch)
221 return;
222
223 /* We might want to turn ->dependencies into a set if the number of
224 * deps turns out to be big enough to make this 'is dep already there'
225 * search inefficient.
226 */
227 util_dynarray_foreach(&batch->dependencies,
228 struct panfrost_batch_fence *, dep) {
229 if (*dep == newdep)
230 return;
231 }
232
233 /* Make sure the dependency graph is acyclic. */
234 assert(!panfrost_dep_graph_contains_batch(newdep->batch, batch));
235
236 panfrost_batch_fence_reference(newdep);
237 util_dynarray_append(&batch->dependencies,
238 struct panfrost_batch_fence *, newdep);
239
240 /* We now have a batch depending on us, let's make sure new draw/clear
241 * calls targeting the same FBO use a new batch object.
242 */
243 if (newdep->batch)
244 panfrost_freeze_batch(newdep->batch);
245 }
246
247 static struct panfrost_batch *
248 panfrost_get_batch(struct panfrost_context *ctx,
249 const struct pipe_framebuffer_state *key)
250 {
251 /* Lookup the job first */
252 struct hash_entry *entry = _mesa_hash_table_search(ctx->batches, key);
253
254 if (entry)
255 return entry->data;
256
257 /* Otherwise, let's create a job */
258
259 struct panfrost_batch *batch = panfrost_create_batch(ctx, key);
260
261 /* Save the created job */
262 _mesa_hash_table_insert(ctx->batches, &batch->key, batch);
263
264 return batch;
265 }
266
267 /* Get the job corresponding to the FBO we're currently rendering into */
268
269 struct panfrost_batch *
270 panfrost_get_batch_for_fbo(struct panfrost_context *ctx)
271 {
272 /* If we're wallpapering, we special case to workaround
273 * u_blitter abuse */
274
275 if (ctx->wallpaper_batch)
276 return ctx->wallpaper_batch;
277
278 /* If we already began rendering, use that */
279
280 if (ctx->batch) {
281 assert(util_framebuffer_state_equal(&ctx->batch->key,
282 &ctx->pipe_framebuffer));
283 return ctx->batch;
284 }
285
286 /* If not, look up the job */
287 struct panfrost_batch *batch = panfrost_get_batch(ctx,
288 &ctx->pipe_framebuffer);
289
290 /* Set this job as the current FBO job. Will be reset when updating the
291 * FB state and when submitting or releasing a job.
292 */
293 ctx->batch = batch;
294 return batch;
295 }
296
297 struct panfrost_batch *
298 panfrost_get_fresh_batch_for_fbo(struct panfrost_context *ctx)
299 {
300 struct panfrost_batch *batch;
301
302 batch = panfrost_get_batch(ctx, &ctx->pipe_framebuffer);
303
304 /* The batch has no draw/clear queued, let's return it directly.
305 * Note that it's perfectly fine to re-use a batch with an
306 * existing clear, we'll just update it with the new clear request.
307 */
308 if (!batch->last_job.gpu)
309 return batch;
310
311 /* Otherwise, we need to freeze the existing one and instantiate a new
312 * one.
313 */
314 panfrost_freeze_batch(batch);
315 return panfrost_get_batch(ctx, &ctx->pipe_framebuffer);
316 }
317
318 static bool
319 panfrost_batch_fence_is_signaled(struct panfrost_batch_fence *fence)
320 {
321 if (fence->signaled)
322 return true;
323
324 /* Batch has not been submitted yet. */
325 if (fence->batch)
326 return false;
327
328 int ret = drmSyncobjWait(pan_screen(fence->ctx->base.screen)->fd,
329 &fence->syncobj, 1, 0, 0, NULL);
330
331 /* Cache whether the fence was signaled */
332 fence->signaled = ret >= 0;
333 return fence->signaled;
334 }
335
336 static void
337 panfrost_bo_access_gc_fences(struct panfrost_context *ctx,
338 struct panfrost_bo_access *access,
339 const struct panfrost_bo *bo)
340 {
341 if (access->writer && panfrost_batch_fence_is_signaled(access->writer)) {
342 panfrost_batch_fence_unreference(access->writer);
343 access->writer = NULL;
344 }
345
346 unsigned nreaders = 0;
347 util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
348 reader) {
349 if (!(*reader))
350 continue;
351
352 if (panfrost_batch_fence_is_signaled(*reader)) {
353 panfrost_batch_fence_unreference(*reader);
354 *reader = NULL;
355 } else {
356 nreaders++;
357 }
358 }
359
360 if (!nreaders)
361 util_dynarray_clear(&access->readers);
362 }
363
364 /* Collect signaled fences to keep the kernel-side syncobj-map small. The
365 * idea is to collect those signaled fences at the end of each flush_all
366 * call. This function is likely to collect only fences from previous
367 * batch flushes not the one that have just have just been submitted and
368 * are probably still in flight when we trigger the garbage collection.
369 * Anyway, we need to do this garbage collection at some point if we don't
370 * want the BO access map to keep invalid entries around and retain
371 * syncobjs forever.
372 */
373 static void
374 panfrost_gc_fences(struct panfrost_context *ctx)
375 {
376 hash_table_foreach(ctx->accessed_bos, entry) {
377 struct panfrost_bo_access *access = entry->data;
378
379 assert(access);
380 panfrost_bo_access_gc_fences(ctx, access, entry->key);
381 if (!util_dynarray_num_elements(&access->readers,
382 struct panfrost_batch_fence *) &&
383 !access->writer)
384 _mesa_hash_table_remove(ctx->accessed_bos, entry);
385 }
386 }
387
388 #ifndef NDEBUG
389 static bool
390 panfrost_batch_in_readers(struct panfrost_batch *batch,
391 struct panfrost_bo_access *access)
392 {
393 util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
394 reader) {
395 if (*reader && (*reader)->batch == batch)
396 return true;
397 }
398
399 return false;
400 }
401 #endif
402
403 static void
404 panfrost_batch_update_bo_access(struct panfrost_batch *batch,
405 struct panfrost_bo *bo, uint32_t access_type,
406 bool already_accessed)
407 {
408 struct panfrost_context *ctx = batch->ctx;
409 struct panfrost_bo_access *access;
410 uint32_t old_access_type;
411 struct hash_entry *entry;
412
413 assert(access_type == PAN_BO_ACCESS_WRITE ||
414 access_type == PAN_BO_ACCESS_READ);
415
416 entry = _mesa_hash_table_search(ctx->accessed_bos, bo);
417 access = entry ? entry->data : NULL;
418 if (access) {
419 old_access_type = access->type;
420 } else {
421 access = rzalloc(ctx, struct panfrost_bo_access);
422 util_dynarray_init(&access->readers, access);
423 _mesa_hash_table_insert(ctx->accessed_bos, bo, access);
424 /* We are the first to access this BO, let's initialize
425 * old_access_type to our own access type in that case.
426 */
427 old_access_type = access_type;
428 access->type = access_type;
429 }
430
431 assert(access);
432
433 if (access_type == PAN_BO_ACCESS_WRITE &&
434 old_access_type == PAN_BO_ACCESS_READ) {
435 /* Previous access was a read and we want to write this BO.
436 * We first need to add explicit deps between our batch and
437 * the previous readers.
438 */
439 util_dynarray_foreach(&access->readers,
440 struct panfrost_batch_fence *, reader) {
441 /* We were already reading the BO, no need to add a dep
442 * on ourself (the acyclic check would complain about
443 * that).
444 */
445 if (!(*reader) || (*reader)->batch == batch)
446 continue;
447
448 panfrost_batch_add_dep(batch, *reader);
449 }
450 panfrost_batch_fence_reference(batch->out_sync);
451
452 /* We now are the new writer. */
453 access->writer = batch->out_sync;
454 access->type = access_type;
455
456 /* Release the previous readers and reset the readers array. */
457 util_dynarray_foreach(&access->readers,
458 struct panfrost_batch_fence *,
459 reader) {
460 if (!*reader)
461 continue;
462 panfrost_batch_fence_unreference(*reader);
463 }
464
465 util_dynarray_clear(&access->readers);
466 } else if (access_type == PAN_BO_ACCESS_WRITE &&
467 old_access_type == PAN_BO_ACCESS_WRITE) {
468 /* Previous access was a write and we want to write this BO.
469 * First check if we were the previous writer, in that case
470 * there's nothing to do. Otherwise we need to add a
471 * dependency between the new writer and the old one.
472 */
473 if (access->writer != batch->out_sync) {
474 if (access->writer) {
475 panfrost_batch_add_dep(batch, access->writer);
476 panfrost_batch_fence_unreference(access->writer);
477 }
478 panfrost_batch_fence_reference(batch->out_sync);
479 access->writer = batch->out_sync;
480 }
481 } else if (access_type == PAN_BO_ACCESS_READ &&
482 old_access_type == PAN_BO_ACCESS_WRITE) {
483 /* Previous access was a write and we want to read this BO.
484 * First check if we were the previous writer, in that case
485 * we want to keep the access type unchanged, as a write is
486 * more constraining than a read.
487 */
488 if (access->writer != batch->out_sync) {
489 /* Add a dependency on the previous writer. */
490 panfrost_batch_add_dep(batch, access->writer);
491
492 /* The previous access was a write, there's no reason
493 * to have entries in the readers array.
494 */
495 assert(!util_dynarray_num_elements(&access->readers,
496 struct panfrost_batch_fence *));
497
498 /* Add ourselves to the readers array. */
499 panfrost_batch_fence_reference(batch->out_sync);
500 util_dynarray_append(&access->readers,
501 struct panfrost_batch_fence *,
502 batch->out_sync);
503 access->type = PAN_BO_ACCESS_READ;
504 }
505 } else {
506 /* We already accessed this BO before, so we should already be
507 * in the reader array.
508 */
509 if (already_accessed) {
510 assert(panfrost_batch_in_readers(batch, access));
511 return;
512 }
513
514 /* Previous access was a read and we want to read this BO.
515 * Add ourselves to the readers array and add a dependency on
516 * the previous writer if any.
517 */
518 panfrost_batch_fence_reference(batch->out_sync);
519 util_dynarray_append(&access->readers,
520 struct panfrost_batch_fence *,
521 batch->out_sync);
522
523 if (access->writer)
524 panfrost_batch_add_dep(batch, access->writer);
525 }
526 }
527
528 void
529 panfrost_batch_add_bo(struct panfrost_batch *batch, struct panfrost_bo *bo,
530 uint32_t flags)
531 {
532 if (!bo)
533 return;
534
535 struct hash_entry *entry;
536 uint32_t old_flags = 0;
537
538 entry = _mesa_hash_table_search(batch->bos, bo);
539 if (!entry) {
540 entry = _mesa_hash_table_insert(batch->bos, bo,
541 (void *)(uintptr_t)flags);
542 panfrost_bo_reference(bo);
543 } else {
544 old_flags = (uintptr_t)entry->data;
545
546 /* All batches have to agree on the shared flag. */
547 assert((old_flags & PAN_BO_ACCESS_SHARED) ==
548 (flags & PAN_BO_ACCESS_SHARED));
549 }
550
551 assert(entry);
552
553 if (old_flags == flags)
554 return;
555
556 flags |= old_flags;
557 entry->data = (void *)(uintptr_t)flags;
558
559 /* If this is not a shared BO, we don't really care about dependency
560 * tracking.
561 */
562 if (!(flags & PAN_BO_ACCESS_SHARED))
563 return;
564
565 /* All dependencies should have been flushed before we execute the
566 * wallpaper draw, so it should be harmless to skip the
567 * update_bo_access() call.
568 */
569 if (batch == batch->ctx->wallpaper_batch)
570 return;
571
572 /* Only pass R/W flags to the dep tracking logic. */
573 assert(flags & PAN_BO_ACCESS_RW);
574 flags = (flags & PAN_BO_ACCESS_WRITE) ?
575 PAN_BO_ACCESS_WRITE : PAN_BO_ACCESS_READ;
576 panfrost_batch_update_bo_access(batch, bo, flags, old_flags != 0);
577 }
578
579 void panfrost_batch_add_fbo_bos(struct panfrost_batch *batch)
580 {
581 uint32_t flags = PAN_BO_ACCESS_SHARED | PAN_BO_ACCESS_WRITE |
582 PAN_BO_ACCESS_VERTEX_TILER |
583 PAN_BO_ACCESS_FRAGMENT;
584
585 for (unsigned i = 0; i < batch->key.nr_cbufs; ++i) {
586 struct panfrost_resource *rsrc = pan_resource(batch->key.cbufs[i]->texture);
587 panfrost_batch_add_bo(batch, rsrc->bo, flags);
588 }
589
590 if (batch->key.zsbuf) {
591 struct panfrost_resource *rsrc = pan_resource(batch->key.zsbuf->texture);
592 panfrost_batch_add_bo(batch, rsrc->bo, flags);
593 }
594 }
595
596 struct panfrost_bo *
597 panfrost_batch_create_bo(struct panfrost_batch *batch, size_t size,
598 uint32_t create_flags, uint32_t access_flags)
599 {
600 struct panfrost_bo *bo;
601
602 bo = panfrost_bo_create(pan_screen(batch->ctx->base.screen), size,
603 create_flags);
604 panfrost_batch_add_bo(batch, bo, access_flags);
605
606 /* panfrost_batch_add_bo() has retained a reference and
607 * panfrost_bo_create() initialize the refcnt to 1, so let's
608 * unreference the BO here so it gets released when the batch is
609 * destroyed (unless it's retained by someone else in the meantime).
610 */
611 panfrost_bo_unreference(bo);
612 return bo;
613 }
614
615 /* Returns the polygon list's GPU address if available, or otherwise allocates
616 * the polygon list. It's perfectly fast to use allocate/free BO directly,
617 * since we'll hit the BO cache and this is one-per-batch anyway. */
618
619 mali_ptr
620 panfrost_batch_get_polygon_list(struct panfrost_batch *batch, unsigned size)
621 {
622 if (batch->polygon_list) {
623 assert(batch->polygon_list->size >= size);
624 } else {
625 /* Create the BO as invisible, as there's no reason to map */
626 size = util_next_power_of_two(size);
627
628 batch->polygon_list = panfrost_batch_create_bo(batch, size,
629 PAN_BO_INVISIBLE,
630 PAN_BO_ACCESS_PRIVATE |
631 PAN_BO_ACCESS_RW |
632 PAN_BO_ACCESS_VERTEX_TILER |
633 PAN_BO_ACCESS_FRAGMENT);
634 }
635
636 return batch->polygon_list->gpu;
637 }
638
639 struct panfrost_bo *
640 panfrost_batch_get_scratchpad(struct panfrost_batch *batch,
641 unsigned shift,
642 unsigned thread_tls_alloc,
643 unsigned core_count)
644 {
645 unsigned size = panfrost_get_total_stack_size(shift,
646 thread_tls_alloc,
647 core_count);
648
649 if (batch->scratchpad) {
650 assert(batch->scratchpad->size >= size);
651 } else {
652 batch->scratchpad = panfrost_batch_create_bo(batch, size,
653 PAN_BO_INVISIBLE,
654 PAN_BO_ACCESS_PRIVATE |
655 PAN_BO_ACCESS_RW |
656 PAN_BO_ACCESS_VERTEX_TILER |
657 PAN_BO_ACCESS_FRAGMENT);
658 }
659
660 return batch->scratchpad;
661 }
662
663 struct panfrost_bo *
664 panfrost_batch_get_tiler_heap(struct panfrost_batch *batch)
665 {
666 if (batch->tiler_heap)
667 return batch->tiler_heap;
668
669 batch->tiler_heap = panfrost_batch_create_bo(batch, 4096 * 4096,
670 PAN_BO_INVISIBLE |
671 PAN_BO_GROWABLE,
672 PAN_BO_ACCESS_PRIVATE |
673 PAN_BO_ACCESS_RW |
674 PAN_BO_ACCESS_VERTEX_TILER |
675 PAN_BO_ACCESS_FRAGMENT);
676 assert(batch->tiler_heap);
677 return batch->tiler_heap;
678 }
679
680 struct panfrost_bo *
681 panfrost_batch_get_tiler_dummy(struct panfrost_batch *batch)
682 {
683 struct panfrost_screen *screen = pan_screen(batch->ctx->base.screen);
684
685 uint32_t create_flags = 0;
686
687 if (batch->tiler_dummy)
688 return batch->tiler_dummy;
689
690 if (!(screen->quirks & MIDGARD_NO_HIER_TILING))
691 create_flags = PAN_BO_INVISIBLE;
692
693 batch->tiler_dummy = panfrost_batch_create_bo(batch, 4096,
694 create_flags,
695 PAN_BO_ACCESS_PRIVATE |
696 PAN_BO_ACCESS_RW |
697 PAN_BO_ACCESS_VERTEX_TILER |
698 PAN_BO_ACCESS_FRAGMENT);
699 assert(batch->tiler_dummy);
700 return batch->tiler_dummy;
701 }
702
703 static void
704 panfrost_batch_draw_wallpaper(struct panfrost_batch *batch)
705 {
706 /* Color 0 is cleared, no need to draw the wallpaper.
707 * TODO: MRT wallpapers.
708 */
709 if (batch->clear & PIPE_CLEAR_COLOR0)
710 return;
711
712 /* Nothing to reload? TODO: MRT wallpapers */
713 if (batch->key.cbufs[0] == NULL)
714 return;
715
716 /* No draw calls, and no clear on the depth/stencil bufs.
717 * Drawing the wallpaper would be useless.
718 */
719 if (!batch->last_tiler.gpu &&
720 !(batch->clear & PIPE_CLEAR_DEPTHSTENCIL))
721 return;
722
723 /* Check if the buffer has any content on it worth preserving */
724
725 struct pipe_surface *surf = batch->key.cbufs[0];
726 struct panfrost_resource *rsrc = pan_resource(surf->texture);
727 unsigned level = surf->u.tex.level;
728
729 if (!rsrc->slices[level].initialized)
730 return;
731
732 batch->ctx->wallpaper_batch = batch;
733
734 /* Clamp the rendering area to the damage extent. The
735 * KHR_partial_update() spec states that trying to render outside of
736 * the damage region is "undefined behavior", so we should be safe.
737 */
738 unsigned damage_width = (rsrc->damage.extent.maxx - rsrc->damage.extent.minx);
739 unsigned damage_height = (rsrc->damage.extent.maxy - rsrc->damage.extent.miny);
740
741 if (damage_width && damage_height) {
742 panfrost_batch_intersection_scissor(batch,
743 rsrc->damage.extent.minx,
744 rsrc->damage.extent.miny,
745 rsrc->damage.extent.maxx,
746 rsrc->damage.extent.maxy);
747 }
748
749 /* FIXME: Looks like aligning on a tile is not enough, but
750 * aligning on twice the tile size seems to works. We don't
751 * know exactly what happens here but this deserves extra
752 * investigation to figure it out.
753 */
754 batch->minx = batch->minx & ~((MALI_TILE_LENGTH * 2) - 1);
755 batch->miny = batch->miny & ~((MALI_TILE_LENGTH * 2) - 1);
756 batch->maxx = MIN2(ALIGN_POT(batch->maxx, MALI_TILE_LENGTH * 2),
757 rsrc->base.width0);
758 batch->maxy = MIN2(ALIGN_POT(batch->maxy, MALI_TILE_LENGTH * 2),
759 rsrc->base.height0);
760
761 struct pipe_scissor_state damage;
762 struct pipe_box rects[4];
763
764 /* Clamp the damage box to the rendering area. */
765 damage.minx = MAX2(batch->minx, rsrc->damage.biggest_rect.x);
766 damage.miny = MAX2(batch->miny, rsrc->damage.biggest_rect.y);
767 damage.maxx = MIN2(batch->maxx,
768 rsrc->damage.biggest_rect.x +
769 rsrc->damage.biggest_rect.width);
770 damage.maxy = MIN2(batch->maxy,
771 rsrc->damage.biggest_rect.y +
772 rsrc->damage.biggest_rect.height);
773
774 /* One damage rectangle means we can end up with at most 4 reload
775 * regions:
776 * 1: left region, only exists if damage.x > 0
777 * 2: right region, only exists if damage.x + damage.width < fb->width
778 * 3: top region, only exists if damage.y > 0. The intersection with
779 * the left and right regions are dropped
780 * 4: bottom region, only exists if damage.y + damage.height < fb->height.
781 * The intersection with the left and right regions are dropped
782 *
783 * ____________________________
784 * | | 3 | |
785 * | |___________| |
786 * | | damage | |
787 * | 1 | rect | 2 |
788 * | |___________| |
789 * | | 4 | |
790 * |_______|___________|______|
791 */
792 u_box_2d(batch->minx, batch->miny, damage.minx - batch->minx,
793 batch->maxy - batch->miny, &rects[0]);
794 u_box_2d(damage.maxx, batch->miny, batch->maxx - damage.maxx,
795 batch->maxy - batch->miny, &rects[1]);
796 u_box_2d(damage.minx, batch->miny, damage.maxx - damage.minx,
797 damage.miny - batch->miny, &rects[2]);
798 u_box_2d(damage.minx, damage.maxy, damage.maxx - damage.minx,
799 batch->maxy - damage.maxy, &rects[3]);
800
801 for (unsigned i = 0; i < 4; i++) {
802 /* Width and height are always >= 0 even if width is declared as a
803 * signed integer: u_box_2d() helper takes unsigned args and
804 * panfrost_set_damage_region() is taking care of clamping
805 * negative values.
806 */
807 if (!rects[i].width || !rects[i].height)
808 continue;
809
810 /* Blit the wallpaper in */
811 panfrost_blit_wallpaper(batch->ctx, &rects[i]);
812 }
813 batch->ctx->wallpaper_batch = NULL;
814 }
815
816 static int
817 panfrost_batch_submit_ioctl(struct panfrost_batch *batch,
818 mali_ptr first_job_desc,
819 uint32_t reqs,
820 struct mali_job_descriptor_header *header)
821 {
822 struct panfrost_context *ctx = batch->ctx;
823 struct pipe_context *gallium = (struct pipe_context *) ctx;
824 struct panfrost_screen *screen = pan_screen(gallium->screen);
825 struct drm_panfrost_submit submit = {0,};
826 uint32_t *bo_handles, *in_syncs = NULL;
827 bool is_fragment_shader;
828 int ret;
829
830 is_fragment_shader = (reqs & PANFROST_JD_REQ_FS) && batch->first_job.gpu;
831 if (is_fragment_shader)
832 submit.in_sync_count = 1;
833 else
834 submit.in_sync_count = util_dynarray_num_elements(&batch->dependencies,
835 struct panfrost_batch_fence *);
836
837 if (submit.in_sync_count) {
838 in_syncs = calloc(submit.in_sync_count, sizeof(*in_syncs));
839 assert(in_syncs);
840 }
841
842 /* The fragment job always depends on the vertex/tiler job if there's
843 * one
844 */
845 if (is_fragment_shader) {
846 in_syncs[0] = batch->out_sync->syncobj;
847 } else {
848 unsigned int i = 0;
849
850 util_dynarray_foreach(&batch->dependencies,
851 struct panfrost_batch_fence *, dep)
852 in_syncs[i++] = (*dep)->syncobj;
853 }
854
855 submit.in_syncs = (uintptr_t)in_syncs;
856 submit.out_sync = batch->out_sync->syncobj;
857 submit.jc = first_job_desc;
858 submit.requirements = reqs;
859
860 bo_handles = calloc(batch->bos->entries, sizeof(*bo_handles));
861 assert(bo_handles);
862
863 hash_table_foreach(batch->bos, entry) {
864 struct panfrost_bo *bo = (struct panfrost_bo *)entry->key;
865 uint32_t flags = (uintptr_t)entry->data;
866
867 assert(bo->gem_handle > 0);
868 bo_handles[submit.bo_handle_count++] = bo->gem_handle;
869
870 /* Update the BO access flags so that panfrost_bo_wait() knows
871 * about all pending accesses.
872 * We only keep the READ/WRITE info since this is all the BO
873 * wait logic cares about.
874 * We also preserve existing flags as this batch might not
875 * be the first one to access the BO.
876 */
877 bo->gpu_access |= flags & (PAN_BO_ACCESS_RW);
878 }
879
880 submit.bo_handles = (u64) (uintptr_t) bo_handles;
881 ret = drmIoctl(screen->fd, DRM_IOCTL_PANFROST_SUBMIT, &submit);
882 free(bo_handles);
883 free(in_syncs);
884
885 if (ret) {
886 fprintf(stderr, "Error submitting: %m\n");
887 return errno;
888 }
889
890 if (pan_debug & PAN_DBG_SYNC) {
891 u32 status;
892
893 /* Wait so we can get errors reported back */
894 drmSyncobjWait(screen->fd, &batch->out_sync->syncobj, 1,
895 INT64_MAX, 0, NULL);
896
897 status = header->exception_status;
898
899 if (status && status != 0x1) {
900 fprintf(stderr, "Job %" PRIx64 " failed: source ID: 0x%x access: %s exception: 0x%x (exception_status 0x%x) fault_pointer 0x%" PRIx64 " \n",
901 first_job_desc,
902 (status >> 16) & 0xFFFF,
903 pandecode_exception_access((status >> 8) & 0x3),
904 status & 0xFF,
905 status,
906 header->fault_pointer);
907 }
908 }
909
910 /* Trace the job if we're doing that */
911 if (pan_debug & PAN_DBG_TRACE) {
912 /* Wait so we can get errors reported back */
913 drmSyncobjWait(screen->fd, &batch->out_sync->syncobj, 1,
914 INT64_MAX, 0, NULL);
915 pandecode_jc(submit.jc, FALSE, screen->gpu_id);
916 }
917
918 return 0;
919 }
920
921 static int
922 panfrost_batch_submit_jobs(struct panfrost_batch *batch)
923 {
924 bool has_draws = batch->first_job.gpu;
925 struct mali_job_descriptor_header *header;
926 int ret = 0;
927
928 if (has_draws) {
929 header = (struct mali_job_descriptor_header *)batch->first_job.cpu;
930 ret = panfrost_batch_submit_ioctl(batch, batch->first_job.gpu, 0, header);
931 assert(!ret);
932 }
933
934 if (batch->first_tiler.gpu || batch->clear) {
935 mali_ptr fragjob = panfrost_fragment_job(batch, has_draws, &header);
936
937 ret = panfrost_batch_submit_ioctl(batch, fragjob, PANFROST_JD_REQ_FS, header);
938 assert(!ret);
939 }
940
941 return ret;
942 }
943
944 static void
945 panfrost_batch_submit(struct panfrost_batch *batch)
946 {
947 assert(batch);
948
949 /* Submit the dependencies first. */
950 util_dynarray_foreach(&batch->dependencies,
951 struct panfrost_batch_fence *, dep) {
952 if ((*dep)->batch)
953 panfrost_batch_submit((*dep)->batch);
954 }
955
956 int ret;
957
958 /* Nothing to do! */
959 if (!batch->last_job.gpu && !batch->clear) {
960 /* Mark the fence as signaled so the fence logic does not try
961 * to wait on it.
962 */
963 batch->out_sync->signaled = true;
964 goto out;
965 }
966
967 panfrost_batch_draw_wallpaper(batch);
968
969 /* Now that all draws are in, we can finally prepare the
970 * FBD for the batch */
971
972 if (batch->framebuffer.gpu && batch->first_job.gpu) {
973 struct panfrost_context *ctx = batch->ctx;
974 struct pipe_context *gallium = (struct pipe_context *) ctx;
975 struct panfrost_screen *screen = pan_screen(gallium->screen);
976
977 if (screen->quirks & MIDGARD_SFBD)
978 panfrost_attach_sfbd(batch, ~0);
979 else
980 panfrost_attach_mfbd(batch, ~0);
981 }
982
983 panfrost_scoreboard_link_batch(batch);
984
985 ret = panfrost_batch_submit_jobs(batch);
986
987 if (ret)
988 fprintf(stderr, "panfrost_batch_submit failed: %d\n", ret);
989
990 /* We must reset the damage info of our render targets here even
991 * though a damage reset normally happens when the DRI layer swaps
992 * buffers. That's because there can be implicit flushes the GL
993 * app is not aware of, and those might impact the damage region: if
994 * part of the damaged portion is drawn during those implicit flushes,
995 * you have to reload those areas before next draws are pushed, and
996 * since the driver can't easily know what's been modified by the draws
997 * it flushed, the easiest solution is to reload everything.
998 */
999 for (unsigned i = 0; i < batch->key.nr_cbufs; i++) {
1000 struct panfrost_resource *res;
1001
1002 if (!batch->key.cbufs[i])
1003 continue;
1004
1005 res = pan_resource(batch->key.cbufs[i]->texture);
1006 panfrost_resource_reset_damage(res);
1007 }
1008
1009 out:
1010 panfrost_freeze_batch(batch);
1011 panfrost_free_batch(batch);
1012 }
1013
1014 void
1015 panfrost_flush_all_batches(struct panfrost_context *ctx, bool wait)
1016 {
1017 struct util_dynarray fences, syncobjs;
1018
1019 if (wait) {
1020 util_dynarray_init(&fences, NULL);
1021 util_dynarray_init(&syncobjs, NULL);
1022 }
1023
1024 hash_table_foreach(ctx->batches, hentry) {
1025 struct panfrost_batch *batch = hentry->data;
1026
1027 assert(batch);
1028
1029 if (wait) {
1030 panfrost_batch_fence_reference(batch->out_sync);
1031 util_dynarray_append(&fences, struct panfrost_batch_fence *,
1032 batch->out_sync);
1033 util_dynarray_append(&syncobjs, uint32_t,
1034 batch->out_sync->syncobj);
1035 }
1036
1037 panfrost_batch_submit(batch);
1038 }
1039
1040 assert(!ctx->batches->entries);
1041
1042 /* Collect batch fences before returning */
1043 panfrost_gc_fences(ctx);
1044
1045 if (!wait)
1046 return;
1047
1048 drmSyncobjWait(pan_screen(ctx->base.screen)->fd,
1049 util_dynarray_begin(&syncobjs),
1050 util_dynarray_num_elements(&syncobjs, uint32_t),
1051 INT64_MAX, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL, NULL);
1052
1053 util_dynarray_foreach(&fences, struct panfrost_batch_fence *, fence)
1054 panfrost_batch_fence_unreference(*fence);
1055
1056 util_dynarray_fini(&fences);
1057 util_dynarray_fini(&syncobjs);
1058 }
1059
1060 bool
1061 panfrost_pending_batches_access_bo(struct panfrost_context *ctx,
1062 const struct panfrost_bo *bo)
1063 {
1064 struct panfrost_bo_access *access;
1065 struct hash_entry *hentry;
1066
1067 hentry = _mesa_hash_table_search(ctx->accessed_bos, bo);
1068 access = hentry ? hentry->data : NULL;
1069 if (!access)
1070 return false;
1071
1072 if (access->writer && access->writer->batch)
1073 return true;
1074
1075 util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
1076 reader) {
1077 if (*reader && (*reader)->batch)
1078 return true;
1079 }
1080
1081 return false;
1082 }
1083
1084 void
1085 panfrost_flush_batches_accessing_bo(struct panfrost_context *ctx,
1086 struct panfrost_bo *bo,
1087 uint32_t access_type)
1088 {
1089 struct panfrost_bo_access *access;
1090 struct hash_entry *hentry;
1091
1092 /* It doesn't make any to flush only the readers. */
1093 assert(access_type == PAN_BO_ACCESS_WRITE ||
1094 access_type == PAN_BO_ACCESS_RW);
1095
1096 hentry = _mesa_hash_table_search(ctx->accessed_bos, bo);
1097 access = hentry ? hentry->data : NULL;
1098 if (!access)
1099 return;
1100
1101 if (access_type & PAN_BO_ACCESS_WRITE && access->writer &&
1102 access->writer->batch)
1103 panfrost_batch_submit(access->writer->batch);
1104
1105 if (!(access_type & PAN_BO_ACCESS_READ))
1106 return;
1107
1108 util_dynarray_foreach(&access->readers, struct panfrost_batch_fence *,
1109 reader) {
1110 if (*reader && (*reader)->batch)
1111 panfrost_batch_submit((*reader)->batch);
1112 }
1113 }
1114
1115 void
1116 panfrost_batch_set_requirements(struct panfrost_batch *batch)
1117 {
1118 struct panfrost_context *ctx = batch->ctx;
1119
1120 if (ctx->rasterizer && ctx->rasterizer->base.multisample)
1121 batch->requirements |= PAN_REQ_MSAA;
1122
1123 if (ctx->depth_stencil && ctx->depth_stencil->depth.writemask)
1124 batch->requirements |= PAN_REQ_DEPTH_WRITE;
1125 }
1126
1127 /* Helper to smear a 32-bit color across 128-bit components */
1128
1129 static void
1130 pan_pack_color_32(uint32_t *packed, uint32_t v)
1131 {
1132 for (unsigned i = 0; i < 4; ++i)
1133 packed[i] = v;
1134 }
1135
1136 static void
1137 pan_pack_color_64(uint32_t *packed, uint32_t lo, uint32_t hi)
1138 {
1139 for (unsigned i = 0; i < 4; i += 2) {
1140 packed[i + 0] = lo;
1141 packed[i + 1] = hi;
1142 }
1143 }
1144
1145 static void
1146 pan_pack_color(uint32_t *packed, const union pipe_color_union *color, enum pipe_format format)
1147 {
1148 /* Alpha magicked to 1.0 if there is no alpha */
1149
1150 bool has_alpha = util_format_has_alpha(format);
1151 float clear_alpha = has_alpha ? color->f[3] : 1.0f;
1152
1153 /* Packed color depends on the framebuffer format */
1154
1155 const struct util_format_description *desc =
1156 util_format_description(format);
1157
1158 if (util_format_is_rgba8_variant(desc)) {
1159 pan_pack_color_32(packed,
1160 ((uint32_t) float_to_ubyte(clear_alpha) << 24) |
1161 ((uint32_t) float_to_ubyte(color->f[2]) << 16) |
1162 ((uint32_t) float_to_ubyte(color->f[1]) << 8) |
1163 ((uint32_t) float_to_ubyte(color->f[0]) << 0));
1164 } else if (format == PIPE_FORMAT_B5G6R5_UNORM) {
1165 /* First, we convert the components to R5, G6, B5 separately */
1166 unsigned r5 = CLAMP(color->f[0], 0.0, 1.0) * 31.0;
1167 unsigned g6 = CLAMP(color->f[1], 0.0, 1.0) * 63.0;
1168 unsigned b5 = CLAMP(color->f[2], 0.0, 1.0) * 31.0;
1169
1170 /* Then we pack into a sparse u32. TODO: Why these shifts? */
1171 pan_pack_color_32(packed, (b5 << 25) | (g6 << 14) | (r5 << 5));
1172 } else if (format == PIPE_FORMAT_B4G4R4A4_UNORM) {
1173 /* We scale the components against 0xF0 (=240.0), rather than 0xFF */
1174 unsigned r4 = CLAMP(color->f[0], 0.0, 1.0) * 240.0;
1175 unsigned g4 = CLAMP(color->f[1], 0.0, 1.0) * 240.0;
1176 unsigned b4 = CLAMP(color->f[2], 0.0, 1.0) * 240.0;
1177 unsigned a4 = CLAMP(clear_alpha, 0.0, 1.0) * 240.0;
1178
1179 /* Pack on *byte* intervals */
1180 pan_pack_color_32(packed, (a4 << 24) | (b4 << 16) | (g4 << 8) | r4);
1181 } else if (format == PIPE_FORMAT_B5G5R5A1_UNORM) {
1182 /* Scale as expected but shift oddly */
1183 unsigned r5 = round(CLAMP(color->f[0], 0.0, 1.0)) * 31.0;
1184 unsigned g5 = round(CLAMP(color->f[1], 0.0, 1.0)) * 31.0;
1185 unsigned b5 = round(CLAMP(color->f[2], 0.0, 1.0)) * 31.0;
1186 unsigned a1 = round(CLAMP(clear_alpha, 0.0, 1.0)) * 1.0;
1187
1188 pan_pack_color_32(packed, (a1 << 31) | (b5 << 25) | (g5 << 15) | (r5 << 5));
1189 } else {
1190 /* Try Gallium's generic default path. Doesn't work for all
1191 * formats but it's a good guess. */
1192
1193 union util_color out;
1194
1195 if (util_format_is_pure_integer(format)) {
1196 memcpy(out.ui, color->ui, 16);
1197 } else {
1198 util_pack_color(color->f, format, &out);
1199 }
1200
1201 unsigned size = util_format_get_blocksize(format);
1202
1203 if (size == 1) {
1204 unsigned b = out.ui[0];
1205 unsigned s = b | (b << 8);
1206 pan_pack_color_32(packed, s | (s << 16));
1207 } else if (size == 2)
1208 pan_pack_color_32(packed, out.ui[0] | (out.ui[0] << 16));
1209 else if (size == 3 || size == 4)
1210 pan_pack_color_32(packed, out.ui[0]);
1211 else if (size == 8)
1212 pan_pack_color_64(packed, out.ui[0], out.ui[1]);
1213 else if (size == 16)
1214 memcpy(packed, out.ui, 16);
1215 else
1216 unreachable("Unknown generic format size packing clear colour");
1217 }
1218 }
1219
1220 void
1221 panfrost_batch_clear(struct panfrost_batch *batch,
1222 unsigned buffers,
1223 const union pipe_color_union *color,
1224 double depth, unsigned stencil)
1225 {
1226 struct panfrost_context *ctx = batch->ctx;
1227
1228 if (buffers & PIPE_CLEAR_COLOR) {
1229 for (unsigned i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) {
1230 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)))
1231 continue;
1232
1233 enum pipe_format format = ctx->pipe_framebuffer.cbufs[i]->format;
1234 pan_pack_color(batch->clear_color[i], color, format);
1235 }
1236 }
1237
1238 if (buffers & PIPE_CLEAR_DEPTH) {
1239 batch->clear_depth = depth;
1240 }
1241
1242 if (buffers & PIPE_CLEAR_STENCIL) {
1243 batch->clear_stencil = stencil;
1244 }
1245
1246 batch->clear |= buffers;
1247
1248 /* Clearing affects the entire framebuffer (by definition -- this is
1249 * the Gallium clear callback, which clears the whole framebuffer. If
1250 * the scissor test were enabled from the GL side, the state tracker
1251 * would emit a quad instead and we wouldn't go down this code path) */
1252
1253 panfrost_batch_union_scissor(batch, 0, 0,
1254 ctx->pipe_framebuffer.width,
1255 ctx->pipe_framebuffer.height);
1256 }
1257
1258 static bool
1259 panfrost_batch_compare(const void *a, const void *b)
1260 {
1261 return util_framebuffer_state_equal(a, b);
1262 }
1263
1264 static uint32_t
1265 panfrost_batch_hash(const void *key)
1266 {
1267 return _mesa_hash_data(key, sizeof(struct pipe_framebuffer_state));
1268 }
1269
1270 /* Given a new bounding rectangle (scissor), let the job cover the union of the
1271 * new and old bounding rectangles */
1272
1273 void
1274 panfrost_batch_union_scissor(struct panfrost_batch *batch,
1275 unsigned minx, unsigned miny,
1276 unsigned maxx, unsigned maxy)
1277 {
1278 batch->minx = MIN2(batch->minx, minx);
1279 batch->miny = MIN2(batch->miny, miny);
1280 batch->maxx = MAX2(batch->maxx, maxx);
1281 batch->maxy = MAX2(batch->maxy, maxy);
1282 }
1283
1284 void
1285 panfrost_batch_intersection_scissor(struct panfrost_batch *batch,
1286 unsigned minx, unsigned miny,
1287 unsigned maxx, unsigned maxy)
1288 {
1289 batch->minx = MAX2(batch->minx, minx);
1290 batch->miny = MAX2(batch->miny, miny);
1291 batch->maxx = MIN2(batch->maxx, maxx);
1292 batch->maxy = MIN2(batch->maxy, maxy);
1293 }
1294
1295 /* Are we currently rendering to the screen (rather than an FBO)? */
1296
1297 bool
1298 panfrost_batch_is_scanout(struct panfrost_batch *batch)
1299 {
1300 /* If there is no color buffer, it's an FBO */
1301 if (batch->key.nr_cbufs != 1)
1302 return false;
1303
1304 /* If we're too early that no framebuffer was sent, it's scanout */
1305 if (!batch->key.cbufs[0])
1306 return true;
1307
1308 return batch->key.cbufs[0]->texture->bind & PIPE_BIND_DISPLAY_TARGET ||
1309 batch->key.cbufs[0]->texture->bind & PIPE_BIND_SCANOUT ||
1310 batch->key.cbufs[0]->texture->bind & PIPE_BIND_SHARED;
1311 }
1312
1313 void
1314 panfrost_batch_init(struct panfrost_context *ctx)
1315 {
1316 ctx->batches = _mesa_hash_table_create(ctx,
1317 panfrost_batch_hash,
1318 panfrost_batch_compare);
1319 ctx->accessed_bos = _mesa_hash_table_create(ctx, _mesa_hash_pointer,
1320 _mesa_key_pointer_equal);
1321 }