2 * Copyright (C) 2019 Alyssa Rosenzweig
3 * Copyright (C) 2014-2017 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "drm-uapi/panfrost_drm.h"
31 #include "pan_context.h"
32 #include "util/hash_table.h"
33 #include "util/ralloc.h"
34 #include "util/format/u_format.h"
35 #include "util/u_pack_color.h"
36 #include "util/rounding.h"
38 #include "pandecode/decode.h"
39 #include "panfrost-quirks.h"
41 /* panfrost_bo_access is here to help us keep track of batch accesses to BOs
42 * and build a proper dependency graph such that batches can be pipelined for
43 * better GPU utilization.
45 * Each accessed BO has a corresponding entry in the ->accessed_bos hash table.
46 * A BO is either being written or read at any time, that's what the type field
48 * When the last access is a write, the batch writing the BO might have read
49 * dependencies (readers that have not been executed yet and want to read the
50 * previous BO content), and when the last access is a read, all readers might
51 * depend on another batch to push its results to memory. That's what the
52 * readers/writers keep track off.
53 * There can only be one writer at any given time, if a new batch wants to
54 * write to the same BO, a dependency will be added between the new writer and
55 * the old writer (at the batch level), and panfrost_bo_access->writer will be
56 * updated to point to the new writer.
58 struct panfrost_bo_access
{
60 struct util_dynarray readers
;
61 struct panfrost_batch_fence
*writer
;
64 static struct panfrost_batch_fence
*
65 panfrost_create_batch_fence(struct panfrost_batch
*batch
)
67 struct panfrost_batch_fence
*fence
;
70 fence
= rzalloc(NULL
, struct panfrost_batch_fence
);
72 pipe_reference_init(&fence
->reference
, 1);
73 fence
->ctx
= batch
->ctx
;
75 ret
= drmSyncobjCreate(pan_device(batch
->ctx
->base
.screen
)->fd
, 0,
83 panfrost_free_batch_fence(struct panfrost_batch_fence
*fence
)
85 drmSyncobjDestroy(pan_device(fence
->ctx
->base
.screen
)->fd
,
91 panfrost_batch_fence_unreference(struct panfrost_batch_fence
*fence
)
93 if (pipe_reference(&fence
->reference
, NULL
))
94 panfrost_free_batch_fence(fence
);
98 panfrost_batch_fence_reference(struct panfrost_batch_fence
*fence
)
100 pipe_reference(NULL
, &fence
->reference
);
103 static struct panfrost_batch
*
104 panfrost_create_batch(struct panfrost_context
*ctx
,
105 const struct pipe_framebuffer_state
*key
)
107 struct panfrost_batch
*batch
= rzalloc(ctx
, struct panfrost_batch
);
111 batch
->bos
= _mesa_hash_table_create(batch
, _mesa_hash_pointer
,
112 _mesa_key_pointer_equal
);
114 batch
->minx
= batch
->miny
= ~0;
115 batch
->maxx
= batch
->maxy
= 0;
117 batch
->out_sync
= panfrost_create_batch_fence(batch
);
118 util_copy_framebuffer_state(&batch
->key
, key
);
120 batch
->pool
= panfrost_create_pool(batch
, pan_device(ctx
->base
.screen
));
126 panfrost_freeze_batch(struct panfrost_batch
*batch
)
128 struct panfrost_context
*ctx
= batch
->ctx
;
129 struct hash_entry
*entry
;
131 /* Remove the entry in the FBO -> batch hash table if the batch
132 * matches. This way, next draws/clears targeting this FBO will trigger
133 * the creation of a new batch.
135 entry
= _mesa_hash_table_search(ctx
->batches
, &batch
->key
);
136 if (entry
&& entry
->data
== batch
)
137 _mesa_hash_table_remove(ctx
->batches
, entry
);
139 /* If this is the bound batch, the panfrost_context parameters are
140 * relevant so submitting it invalidates those parameters, but if it's
141 * not bound, the context parameters are for some other batch so we
142 * can't invalidate them.
144 if (ctx
->batch
== batch
) {
145 panfrost_invalidate_frame(ctx
);
150 #ifdef PAN_BATCH_DEBUG
151 static bool panfrost_batch_is_frozen(struct panfrost_batch
*batch
)
153 struct panfrost_context
*ctx
= batch
->ctx
;
154 struct hash_entry
*entry
;
156 entry
= _mesa_hash_table_search(ctx
->batches
, &batch
->key
);
157 if (entry
&& entry
->data
== batch
)
160 if (ctx
->batch
== batch
)
168 panfrost_free_batch(struct panfrost_batch
*batch
)
173 #ifdef PAN_BATCH_DEBUG
174 assert(panfrost_batch_is_frozen(batch
));
177 hash_table_foreach(batch
->bos
, entry
)
178 panfrost_bo_unreference((struct panfrost_bo
*)entry
->key
);
180 hash_table_foreach(batch
->pool
.bos
, entry
)
181 panfrost_bo_unreference((struct panfrost_bo
*)entry
->key
);
183 util_dynarray_foreach(&batch
->dependencies
,
184 struct panfrost_batch_fence
*, dep
) {
185 panfrost_batch_fence_unreference(*dep
);
188 /* The out_sync fence lifetime is different from the the batch one
189 * since other batches might want to wait on a fence of already
190 * submitted/signaled batch. All we need to do here is make sure the
191 * fence does not point to an invalid batch, which the core will
192 * interpret as 'batch is already submitted'.
194 batch
->out_sync
->batch
= NULL
;
195 panfrost_batch_fence_unreference(batch
->out_sync
);
197 util_unreference_framebuffer_state(&batch
->key
);
201 #ifdef PAN_BATCH_DEBUG
203 panfrost_dep_graph_contains_batch(struct panfrost_batch
*root
,
204 struct panfrost_batch
*batch
)
209 util_dynarray_foreach(&root
->dependencies
,
210 struct panfrost_batch_fence
*, dep
) {
211 if ((*dep
)->batch
== batch
||
212 panfrost_dep_graph_contains_batch((*dep
)->batch
, batch
))
221 panfrost_batch_add_dep(struct panfrost_batch
*batch
,
222 struct panfrost_batch_fence
*newdep
)
224 if (batch
== newdep
->batch
)
227 /* We might want to turn ->dependencies into a set if the number of
228 * deps turns out to be big enough to make this 'is dep already there'
229 * search inefficient.
231 util_dynarray_foreach(&batch
->dependencies
,
232 struct panfrost_batch_fence
*, dep
) {
237 #ifdef PAN_BATCH_DEBUG
238 /* Make sure the dependency graph is acyclic. */
239 assert(!panfrost_dep_graph_contains_batch(newdep
->batch
, batch
));
242 panfrost_batch_fence_reference(newdep
);
243 util_dynarray_append(&batch
->dependencies
,
244 struct panfrost_batch_fence
*, newdep
);
246 /* We now have a batch depending on us, let's make sure new draw/clear
247 * calls targeting the same FBO use a new batch object.
250 panfrost_freeze_batch(newdep
->batch
);
253 static struct panfrost_batch
*
254 panfrost_get_batch(struct panfrost_context
*ctx
,
255 const struct pipe_framebuffer_state
*key
)
257 /* Lookup the job first */
258 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->batches
, key
);
263 /* Otherwise, let's create a job */
265 struct panfrost_batch
*batch
= panfrost_create_batch(ctx
, key
);
267 /* Save the created job */
268 _mesa_hash_table_insert(ctx
->batches
, &batch
->key
, batch
);
273 /* Get the job corresponding to the FBO we're currently rendering into */
275 struct panfrost_batch
*
276 panfrost_get_batch_for_fbo(struct panfrost_context
*ctx
)
278 /* If we're wallpapering, we special case to workaround
281 if (ctx
->wallpaper_batch
)
282 return ctx
->wallpaper_batch
;
284 /* If we already began rendering, use that */
287 assert(util_framebuffer_state_equal(&ctx
->batch
->key
,
288 &ctx
->pipe_framebuffer
));
292 /* If not, look up the job */
293 struct panfrost_batch
*batch
= panfrost_get_batch(ctx
,
294 &ctx
->pipe_framebuffer
);
296 /* Set this job as the current FBO job. Will be reset when updating the
297 * FB state and when submitting or releasing a job.
303 struct panfrost_batch
*
304 panfrost_get_fresh_batch_for_fbo(struct panfrost_context
*ctx
)
306 struct panfrost_batch
*batch
;
308 batch
= panfrost_get_batch(ctx
, &ctx
->pipe_framebuffer
);
310 /* The batch has no draw/clear queued, let's return it directly.
311 * Note that it's perfectly fine to re-use a batch with an
312 * existing clear, we'll just update it with the new clear request.
314 if (!batch
->first_job
)
317 /* Otherwise, we need to freeze the existing one and instantiate a new
320 panfrost_freeze_batch(batch
);
321 return panfrost_get_batch(ctx
, &ctx
->pipe_framebuffer
);
325 panfrost_batch_fence_is_signaled(struct panfrost_batch_fence
*fence
)
330 /* Batch has not been submitted yet. */
334 int ret
= drmSyncobjWait(pan_device(fence
->ctx
->base
.screen
)->fd
,
335 &fence
->syncobj
, 1, 0, 0, NULL
);
337 /* Cache whether the fence was signaled */
338 fence
->signaled
= ret
>= 0;
339 return fence
->signaled
;
343 panfrost_bo_access_gc_fences(struct panfrost_context
*ctx
,
344 struct panfrost_bo_access
*access
,
345 const struct panfrost_bo
*bo
)
347 if (access
->writer
&& panfrost_batch_fence_is_signaled(access
->writer
)) {
348 panfrost_batch_fence_unreference(access
->writer
);
349 access
->writer
= NULL
;
352 struct panfrost_batch_fence
**readers_array
= util_dynarray_begin(&access
->readers
);
353 struct panfrost_batch_fence
**new_readers
= readers_array
;
355 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
360 if (panfrost_batch_fence_is_signaled(*reader
)) {
361 panfrost_batch_fence_unreference(*reader
);
364 /* Build a new array of only unsignaled fences in-place */
365 *(new_readers
++) = *reader
;
369 if (!util_dynarray_resize(&access
->readers
, struct panfrost_batch_fence
*,
370 new_readers
- readers_array
) &&
371 new_readers
!= readers_array
)
372 unreachable("Invalid dynarray access->readers");
375 /* Collect signaled fences to keep the kernel-side syncobj-map small. The
376 * idea is to collect those signaled fences at the end of each flush_all
377 * call. This function is likely to collect only fences from previous
378 * batch flushes not the one that have just have just been submitted and
379 * are probably still in flight when we trigger the garbage collection.
380 * Anyway, we need to do this garbage collection at some point if we don't
381 * want the BO access map to keep invalid entries around and retain
385 panfrost_gc_fences(struct panfrost_context
*ctx
)
387 hash_table_foreach(ctx
->accessed_bos
, entry
) {
388 struct panfrost_bo_access
*access
= entry
->data
;
391 panfrost_bo_access_gc_fences(ctx
, access
, entry
->key
);
392 if (!util_dynarray_num_elements(&access
->readers
,
393 struct panfrost_batch_fence
*) &&
396 _mesa_hash_table_remove(ctx
->accessed_bos
, entry
);
401 #ifdef PAN_BATCH_DEBUG
403 panfrost_batch_in_readers(struct panfrost_batch
*batch
,
404 struct panfrost_bo_access
*access
)
406 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
408 if (*reader
&& (*reader
)->batch
== batch
)
417 panfrost_batch_update_bo_access(struct panfrost_batch
*batch
,
418 struct panfrost_bo
*bo
, uint32_t access_type
,
419 bool already_accessed
)
421 struct panfrost_context
*ctx
= batch
->ctx
;
422 struct panfrost_bo_access
*access
;
423 uint32_t old_access_type
;
424 struct hash_entry
*entry
;
426 assert(access_type
== PAN_BO_ACCESS_WRITE
||
427 access_type
== PAN_BO_ACCESS_READ
);
429 entry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
430 access
= entry
? entry
->data
: NULL
;
432 old_access_type
= access
->type
;
434 access
= rzalloc(ctx
, struct panfrost_bo_access
);
435 util_dynarray_init(&access
->readers
, access
);
436 _mesa_hash_table_insert(ctx
->accessed_bos
, bo
, access
);
437 /* We are the first to access this BO, let's initialize
438 * old_access_type to our own access type in that case.
440 old_access_type
= access_type
;
441 access
->type
= access_type
;
446 if (access_type
== PAN_BO_ACCESS_WRITE
&&
447 old_access_type
== PAN_BO_ACCESS_READ
) {
448 /* Previous access was a read and we want to write this BO.
449 * We first need to add explicit deps between our batch and
450 * the previous readers.
452 util_dynarray_foreach(&access
->readers
,
453 struct panfrost_batch_fence
*, reader
) {
454 /* We were already reading the BO, no need to add a dep
455 * on ourself (the acyclic check would complain about
458 if (!(*reader
) || (*reader
)->batch
== batch
)
461 panfrost_batch_add_dep(batch
, *reader
);
463 panfrost_batch_fence_reference(batch
->out_sync
);
465 /* We now are the new writer. */
466 access
->writer
= batch
->out_sync
;
467 access
->type
= access_type
;
469 /* Release the previous readers and reset the readers array. */
470 util_dynarray_foreach(&access
->readers
,
471 struct panfrost_batch_fence
*,
475 panfrost_batch_fence_unreference(*reader
);
478 util_dynarray_clear(&access
->readers
);
479 } else if (access_type
== PAN_BO_ACCESS_WRITE
&&
480 old_access_type
== PAN_BO_ACCESS_WRITE
) {
481 /* Previous access was a write and we want to write this BO.
482 * First check if we were the previous writer, in that case
483 * there's nothing to do. Otherwise we need to add a
484 * dependency between the new writer and the old one.
486 if (access
->writer
!= batch
->out_sync
) {
487 if (access
->writer
) {
488 panfrost_batch_add_dep(batch
, access
->writer
);
489 panfrost_batch_fence_unreference(access
->writer
);
491 panfrost_batch_fence_reference(batch
->out_sync
);
492 access
->writer
= batch
->out_sync
;
494 } else if (access_type
== PAN_BO_ACCESS_READ
&&
495 old_access_type
== PAN_BO_ACCESS_WRITE
) {
496 /* Previous access was a write and we want to read this BO.
497 * First check if we were the previous writer, in that case
498 * we want to keep the access type unchanged, as a write is
499 * more constraining than a read.
501 if (access
->writer
!= batch
->out_sync
) {
502 /* Add a dependency on the previous writer. */
503 panfrost_batch_add_dep(batch
, access
->writer
);
505 /* The previous access was a write, there's no reason
506 * to have entries in the readers array.
508 assert(!util_dynarray_num_elements(&access
->readers
,
509 struct panfrost_batch_fence
*));
511 /* Add ourselves to the readers array. */
512 panfrost_batch_fence_reference(batch
->out_sync
);
513 util_dynarray_append(&access
->readers
,
514 struct panfrost_batch_fence
*,
516 access
->type
= PAN_BO_ACCESS_READ
;
519 /* We already accessed this BO before, so we should already be
520 * in the reader array.
522 #ifdef PAN_BATCH_DEBUG
523 if (already_accessed
) {
524 assert(panfrost_batch_in_readers(batch
, access
));
529 /* Previous access was a read and we want to read this BO.
530 * Add ourselves to the readers array and add a dependency on
531 * the previous writer if any.
533 panfrost_batch_fence_reference(batch
->out_sync
);
534 util_dynarray_append(&access
->readers
,
535 struct panfrost_batch_fence
*,
539 panfrost_batch_add_dep(batch
, access
->writer
);
544 panfrost_batch_add_bo(struct panfrost_batch
*batch
, struct panfrost_bo
*bo
,
550 struct hash_entry
*entry
;
551 uint32_t old_flags
= 0;
553 entry
= _mesa_hash_table_search(batch
->bos
, bo
);
555 entry
= _mesa_hash_table_insert(batch
->bos
, bo
,
556 (void *)(uintptr_t)flags
);
557 panfrost_bo_reference(bo
);
559 old_flags
= (uintptr_t)entry
->data
;
561 /* All batches have to agree on the shared flag. */
562 assert((old_flags
& PAN_BO_ACCESS_SHARED
) ==
563 (flags
& PAN_BO_ACCESS_SHARED
));
568 if (old_flags
== flags
)
572 entry
->data
= (void *)(uintptr_t)flags
;
574 /* If this is not a shared BO, we don't really care about dependency
577 if (!(flags
& PAN_BO_ACCESS_SHARED
))
580 /* All dependencies should have been flushed before we execute the
581 * wallpaper draw, so it should be harmless to skip the
582 * update_bo_access() call.
584 if (batch
== batch
->ctx
->wallpaper_batch
)
587 /* Only pass R/W flags to the dep tracking logic. */
588 assert(flags
& PAN_BO_ACCESS_RW
);
589 flags
= (flags
& PAN_BO_ACCESS_WRITE
) ?
590 PAN_BO_ACCESS_WRITE
: PAN_BO_ACCESS_READ
;
591 panfrost_batch_update_bo_access(batch
, bo
, flags
, old_flags
!= 0);
595 panfrost_batch_add_resource_bos(struct panfrost_batch
*batch
,
596 struct panfrost_resource
*rsrc
,
599 panfrost_batch_add_bo(batch
, rsrc
->bo
, flags
);
601 for (unsigned i
= 0; i
< MAX_MIP_LEVELS
; i
++)
602 if (rsrc
->slices
[i
].checksum_bo
)
603 panfrost_batch_add_bo(batch
, rsrc
->slices
[i
].checksum_bo
, flags
);
605 if (rsrc
->separate_stencil
)
606 panfrost_batch_add_bo(batch
, rsrc
->separate_stencil
->bo
, flags
);
609 void panfrost_batch_add_fbo_bos(struct panfrost_batch
*batch
)
611 uint32_t flags
= PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_WRITE
|
612 PAN_BO_ACCESS_VERTEX_TILER
|
613 PAN_BO_ACCESS_FRAGMENT
;
615 for (unsigned i
= 0; i
< batch
->key
.nr_cbufs
; ++i
) {
616 struct panfrost_resource
*rsrc
= pan_resource(batch
->key
.cbufs
[i
]->texture
);
617 panfrost_batch_add_resource_bos(batch
, rsrc
, flags
);
620 if (batch
->key
.zsbuf
) {
621 struct panfrost_resource
*rsrc
= pan_resource(batch
->key
.zsbuf
->texture
);
622 panfrost_batch_add_resource_bos(batch
, rsrc
, flags
);
627 panfrost_batch_create_bo(struct panfrost_batch
*batch
, size_t size
,
628 uint32_t create_flags
, uint32_t access_flags
)
630 struct panfrost_bo
*bo
;
632 bo
= pan_bo_create(pan_device(batch
->ctx
->base
.screen
), size
,
634 panfrost_batch_add_bo(batch
, bo
, access_flags
);
636 /* panfrost_batch_add_bo() has retained a reference and
637 * pan_bo_create() initialize the refcnt to 1, so let's
638 * unreference the BO here so it gets released when the batch is
639 * destroyed (unless it's retained by someone else in the meantime).
641 panfrost_bo_unreference(bo
);
645 /* Returns the polygon list's GPU address if available, or otherwise allocates
646 * the polygon list. It's perfectly fast to use allocate/free BO directly,
647 * since we'll hit the BO cache and this is one-per-batch anyway. */
650 panfrost_batch_get_polygon_list(struct panfrost_batch
*batch
, unsigned size
)
652 if (batch
->polygon_list
) {
653 assert(batch
->polygon_list
->size
>= size
);
655 /* Create the BO as invisible, as there's no reason to map */
656 size
= util_next_power_of_two(size
);
658 batch
->polygon_list
= panfrost_batch_create_bo(batch
, size
,
660 PAN_BO_ACCESS_PRIVATE
|
662 PAN_BO_ACCESS_VERTEX_TILER
|
663 PAN_BO_ACCESS_FRAGMENT
);
666 return batch
->polygon_list
->gpu
;
670 panfrost_batch_get_scratchpad(struct panfrost_batch
*batch
,
672 unsigned thread_tls_alloc
,
675 unsigned size
= panfrost_get_total_stack_size(shift
,
679 if (batch
->scratchpad
) {
680 assert(batch
->scratchpad
->size
>= size
);
682 batch
->scratchpad
= panfrost_batch_create_bo(batch
, size
,
684 PAN_BO_ACCESS_PRIVATE
|
686 PAN_BO_ACCESS_VERTEX_TILER
|
687 PAN_BO_ACCESS_FRAGMENT
);
690 return batch
->scratchpad
;
694 panfrost_batch_get_shared_memory(struct panfrost_batch
*batch
,
696 unsigned workgroup_count
)
698 if (batch
->shared_memory
) {
699 assert(batch
->shared_memory
->size
>= size
);
701 batch
->shared_memory
= panfrost_batch_create_bo(batch
, size
,
703 PAN_BO_ACCESS_PRIVATE
|
705 PAN_BO_ACCESS_VERTEX_TILER
);
708 return batch
->shared_memory
;
712 panfrost_batch_get_tiler_heap(struct panfrost_batch
*batch
)
714 if (batch
->tiler_heap
)
715 return batch
->tiler_heap
;
717 batch
->tiler_heap
= panfrost_batch_create_bo(batch
, 4096 * 4096,
720 PAN_BO_ACCESS_PRIVATE
|
722 PAN_BO_ACCESS_VERTEX_TILER
|
723 PAN_BO_ACCESS_FRAGMENT
);
724 assert(batch
->tiler_heap
);
725 return batch
->tiler_heap
;
729 panfrost_batch_get_tiler_meta(struct panfrost_batch
*batch
, unsigned vertex_count
)
734 if (batch
->tiler_meta
)
735 return batch
->tiler_meta
;
737 struct panfrost_bo
*tiler_heap
;
738 tiler_heap
= panfrost_batch_get_tiler_heap(batch
);
740 struct bifrost_tiler_heap_meta tiler_heap_meta
= {
741 .heap_size
= tiler_heap
->size
,
742 .tiler_heap_start
= tiler_heap
->gpu
,
743 .tiler_heap_free
= tiler_heap
->gpu
,
744 .tiler_heap_end
= tiler_heap
->gpu
+ tiler_heap
->size
,
746 .unk7e007e
= 0x7e007e,
749 struct bifrost_tiler_meta tiler_meta
= {
750 .hierarchy_mask
= 0x28,
752 .width
= MALI_POSITIVE(batch
->key
.width
),
753 .height
= MALI_POSITIVE(batch
->key
.height
),
754 .tiler_heap_meta
= panfrost_upload_transient(batch
, &tiler_heap_meta
, sizeof(tiler_heap_meta
)),
757 batch
->tiler_meta
= panfrost_upload_transient(batch
, &tiler_meta
, sizeof(tiler_meta
));
758 return batch
->tiler_meta
;
762 panfrost_batch_get_tiler_dummy(struct panfrost_batch
*batch
)
764 struct panfrost_device
*dev
= pan_device(batch
->ctx
->base
.screen
);
766 uint32_t create_flags
= 0;
768 if (batch
->tiler_dummy
)
769 return batch
->tiler_dummy
;
771 if (!(dev
->quirks
& MIDGARD_NO_HIER_TILING
))
772 create_flags
= PAN_BO_INVISIBLE
;
774 batch
->tiler_dummy
= panfrost_batch_create_bo(batch
, 4096,
776 PAN_BO_ACCESS_PRIVATE
|
778 PAN_BO_ACCESS_VERTEX_TILER
|
779 PAN_BO_ACCESS_FRAGMENT
);
780 assert(batch
->tiler_dummy
);
781 return batch
->tiler_dummy
;
785 panfrost_batch_draw_wallpaper(struct panfrost_batch
*batch
)
787 /* Color 0 is cleared, no need to draw the wallpaper.
788 * TODO: MRT wallpapers.
790 if (batch
->clear
& PIPE_CLEAR_COLOR0
)
793 /* Nothing to reload? TODO: MRT wallpapers */
794 if (batch
->key
.cbufs
[0] == NULL
)
797 /* No draw calls, and no clear on the depth/stencil bufs.
798 * Drawing the wallpaper would be useless.
800 if (!batch
->tiler_dep
&&
801 !(batch
->clear
& PIPE_CLEAR_DEPTHSTENCIL
))
804 /* Check if the buffer has any content on it worth preserving */
806 struct pipe_surface
*surf
= batch
->key
.cbufs
[0];
807 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
808 unsigned level
= surf
->u
.tex
.level
;
810 if (!rsrc
->slices
[level
].initialized
)
813 batch
->ctx
->wallpaper_batch
= batch
;
815 /* Clamp the rendering area to the damage extent. The
816 * KHR_partial_update() spec states that trying to render outside of
817 * the damage region is "undefined behavior", so we should be safe.
819 unsigned damage_width
= (rsrc
->damage
.extent
.maxx
- rsrc
->damage
.extent
.minx
);
820 unsigned damage_height
= (rsrc
->damage
.extent
.maxy
- rsrc
->damage
.extent
.miny
);
822 if (damage_width
&& damage_height
) {
823 panfrost_batch_intersection_scissor(batch
,
824 rsrc
->damage
.extent
.minx
,
825 rsrc
->damage
.extent
.miny
,
826 rsrc
->damage
.extent
.maxx
,
827 rsrc
->damage
.extent
.maxy
);
830 /* FIXME: Looks like aligning on a tile is not enough, but
831 * aligning on twice the tile size seems to works. We don't
832 * know exactly what happens here but this deserves extra
833 * investigation to figure it out.
835 batch
->minx
= batch
->minx
& ~((MALI_TILE_LENGTH
* 2) - 1);
836 batch
->miny
= batch
->miny
& ~((MALI_TILE_LENGTH
* 2) - 1);
837 batch
->maxx
= MIN2(ALIGN_POT(batch
->maxx
, MALI_TILE_LENGTH
* 2),
839 batch
->maxy
= MIN2(ALIGN_POT(batch
->maxy
, MALI_TILE_LENGTH
* 2),
842 struct pipe_scissor_state damage
;
843 struct pipe_box rects
[4];
845 /* Clamp the damage box to the rendering area. */
846 damage
.minx
= MAX2(batch
->minx
, rsrc
->damage
.biggest_rect
.x
);
847 damage
.miny
= MAX2(batch
->miny
, rsrc
->damage
.biggest_rect
.y
);
848 damage
.maxx
= MIN2(batch
->maxx
,
849 rsrc
->damage
.biggest_rect
.x
+
850 rsrc
->damage
.biggest_rect
.width
);
851 damage
.maxx
= MAX2(damage
.maxx
, damage
.minx
);
852 damage
.maxy
= MIN2(batch
->maxy
,
853 rsrc
->damage
.biggest_rect
.y
+
854 rsrc
->damage
.biggest_rect
.height
);
855 damage
.maxy
= MAX2(damage
.maxy
, damage
.miny
);
857 /* One damage rectangle means we can end up with at most 4 reload
859 * 1: left region, only exists if damage.x > 0
860 * 2: right region, only exists if damage.x + damage.width < fb->width
861 * 3: top region, only exists if damage.y > 0. The intersection with
862 * the left and right regions are dropped
863 * 4: bottom region, only exists if damage.y + damage.height < fb->height.
864 * The intersection with the left and right regions are dropped
866 * ____________________________
873 * |_______|___________|______|
875 u_box_2d(batch
->minx
, batch
->miny
, damage
.minx
- batch
->minx
,
876 batch
->maxy
- batch
->miny
, &rects
[0]);
877 u_box_2d(damage
.maxx
, batch
->miny
, batch
->maxx
- damage
.maxx
,
878 batch
->maxy
- batch
->miny
, &rects
[1]);
879 u_box_2d(damage
.minx
, batch
->miny
, damage
.maxx
- damage
.minx
,
880 damage
.miny
- batch
->miny
, &rects
[2]);
881 u_box_2d(damage
.minx
, damage
.maxy
, damage
.maxx
- damage
.minx
,
882 batch
->maxy
- damage
.maxy
, &rects
[3]);
884 for (unsigned i
= 0; i
< 4; i
++) {
885 /* Width and height are always >= 0 even if width is declared as a
886 * signed integer: u_box_2d() helper takes unsigned args and
887 * panfrost_set_damage_region() is taking care of clamping
890 if (!rects
[i
].width
|| !rects
[i
].height
)
893 /* Blit the wallpaper in */
894 panfrost_blit_wallpaper(batch
->ctx
, &rects
[i
]);
896 batch
->ctx
->wallpaper_batch
= NULL
;
900 panfrost_batch_record_bo(struct hash_entry
*entry
, unsigned *bo_handles
, unsigned idx
)
902 struct panfrost_bo
*bo
= (struct panfrost_bo
*)entry
->key
;
903 uint32_t flags
= (uintptr_t)entry
->data
;
905 assert(bo
->gem_handle
> 0);
906 bo_handles
[idx
] = bo
->gem_handle
;
908 /* Update the BO access flags so that panfrost_bo_wait() knows
909 * about all pending accesses.
910 * We only keep the READ/WRITE info since this is all the BO
911 * wait logic cares about.
912 * We also preserve existing flags as this batch might not
913 * be the first one to access the BO.
915 bo
->gpu_access
|= flags
& (PAN_BO_ACCESS_RW
);
919 panfrost_batch_submit_ioctl(struct panfrost_batch
*batch
,
920 mali_ptr first_job_desc
,
923 struct panfrost_context
*ctx
= batch
->ctx
;
924 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
925 struct panfrost_device
*dev
= pan_device(gallium
->screen
);
926 struct drm_panfrost_submit submit
= {0,};
927 uint32_t *bo_handles
, *in_syncs
= NULL
;
928 bool is_fragment_shader
;
931 is_fragment_shader
= (reqs
& PANFROST_JD_REQ_FS
) && batch
->first_job
;
932 if (is_fragment_shader
)
933 submit
.in_sync_count
= 1;
935 submit
.in_sync_count
= util_dynarray_num_elements(&batch
->dependencies
,
936 struct panfrost_batch_fence
*);
938 if (submit
.in_sync_count
) {
939 in_syncs
= calloc(submit
.in_sync_count
, sizeof(*in_syncs
));
943 /* The fragment job always depends on the vertex/tiler job if there's
946 if (is_fragment_shader
) {
947 in_syncs
[0] = batch
->out_sync
->syncobj
;
951 util_dynarray_foreach(&batch
->dependencies
,
952 struct panfrost_batch_fence
*, dep
)
953 in_syncs
[i
++] = (*dep
)->syncobj
;
956 submit
.in_syncs
= (uintptr_t)in_syncs
;
957 submit
.out_sync
= batch
->out_sync
->syncobj
;
958 submit
.jc
= first_job_desc
;
959 submit
.requirements
= reqs
;
961 bo_handles
= calloc(batch
->pool
.bos
->entries
+ batch
->bos
->entries
, sizeof(*bo_handles
));
964 hash_table_foreach(batch
->bos
, entry
)
965 panfrost_batch_record_bo(entry
, bo_handles
, submit
.bo_handle_count
++);
967 hash_table_foreach(batch
->pool
.bos
, entry
)
968 panfrost_batch_record_bo(entry
, bo_handles
, submit
.bo_handle_count
++);
970 submit
.bo_handles
= (u64
) (uintptr_t) bo_handles
;
971 ret
= drmIoctl(dev
->fd
, DRM_IOCTL_PANFROST_SUBMIT
, &submit
);
976 DBG("Error submitting: %m\n");
980 /* Trace the job if we're doing that */
981 if (pan_debug
& (PAN_DBG_TRACE
| PAN_DBG_SYNC
)) {
982 /* Wait so we can get errors reported back */
983 drmSyncobjWait(dev
->fd
, &batch
->out_sync
->syncobj
, 1,
986 /* Trace gets priority over sync */
987 bool minimal
= !(pan_debug
& PAN_DBG_TRACE
);
988 pandecode_jc(submit
.jc
, dev
->quirks
& IS_BIFROST
, dev
->gpu_id
, minimal
);
995 panfrost_batch_submit_jobs(struct panfrost_batch
*batch
)
997 bool has_draws
= batch
->first_job
;
1001 ret
= panfrost_batch_submit_ioctl(batch
, batch
->first_job
, 0);
1005 if (batch
->tiler_dep
|| batch
->clear
) {
1006 mali_ptr fragjob
= panfrost_fragment_job(batch
, has_draws
);
1007 ret
= panfrost_batch_submit_ioctl(batch
, fragjob
, PANFROST_JD_REQ_FS
);
1015 panfrost_batch_submit(struct panfrost_batch
*batch
)
1019 /* Submit the dependencies first. */
1020 util_dynarray_foreach(&batch
->dependencies
,
1021 struct panfrost_batch_fence
*, dep
) {
1023 panfrost_batch_submit((*dep
)->batch
);
1028 /* Nothing to do! */
1029 if (!batch
->first_job
&& !batch
->clear
) {
1030 /* Mark the fence as signaled so the fence logic does not try
1033 batch
->out_sync
->signaled
= true;
1037 panfrost_batch_draw_wallpaper(batch
);
1039 /* Now that all draws are in, we can finally prepare the
1040 * FBD for the batch */
1042 if (batch
->framebuffer
.gpu
&& batch
->first_job
) {
1043 struct panfrost_context
*ctx
= batch
->ctx
;
1044 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
1045 struct panfrost_device
*dev
= pan_device(gallium
->screen
);
1047 if (dev
->quirks
& MIDGARD_SFBD
)
1048 panfrost_attach_sfbd(batch
, ~0);
1050 panfrost_attach_mfbd(batch
, ~0);
1053 panfrost_scoreboard_initialize_tiler(batch
);
1055 ret
= panfrost_batch_submit_jobs(batch
);
1058 DBG("panfrost_batch_submit failed: %d\n", ret
);
1060 /* We must reset the damage info of our render targets here even
1061 * though a damage reset normally happens when the DRI layer swaps
1062 * buffers. That's because there can be implicit flushes the GL
1063 * app is not aware of, and those might impact the damage region: if
1064 * part of the damaged portion is drawn during those implicit flushes,
1065 * you have to reload those areas before next draws are pushed, and
1066 * since the driver can't easily know what's been modified by the draws
1067 * it flushed, the easiest solution is to reload everything.
1069 for (unsigned i
= 0; i
< batch
->key
.nr_cbufs
; i
++) {
1070 struct panfrost_resource
*res
;
1072 if (!batch
->key
.cbufs
[i
])
1075 res
= pan_resource(batch
->key
.cbufs
[i
]->texture
);
1076 panfrost_resource_reset_damage(res
);
1080 panfrost_freeze_batch(batch
);
1081 panfrost_free_batch(batch
);
1085 panfrost_flush_all_batches(struct panfrost_context
*ctx
, bool wait
)
1087 struct util_dynarray fences
, syncobjs
;
1090 util_dynarray_init(&fences
, NULL
);
1091 util_dynarray_init(&syncobjs
, NULL
);
1094 hash_table_foreach(ctx
->batches
, hentry
) {
1095 struct panfrost_batch
*batch
= hentry
->data
;
1100 panfrost_batch_fence_reference(batch
->out_sync
);
1101 util_dynarray_append(&fences
, struct panfrost_batch_fence
*,
1103 util_dynarray_append(&syncobjs
, uint32_t,
1104 batch
->out_sync
->syncobj
);
1107 panfrost_batch_submit(batch
);
1110 assert(!ctx
->batches
->entries
);
1112 /* Collect batch fences before returning */
1113 panfrost_gc_fences(ctx
);
1118 drmSyncobjWait(pan_device(ctx
->base
.screen
)->fd
,
1119 util_dynarray_begin(&syncobjs
),
1120 util_dynarray_num_elements(&syncobjs
, uint32_t),
1121 INT64_MAX
, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
, NULL
);
1123 util_dynarray_foreach(&fences
, struct panfrost_batch_fence
*, fence
)
1124 panfrost_batch_fence_unreference(*fence
);
1126 util_dynarray_fini(&fences
);
1127 util_dynarray_fini(&syncobjs
);
1131 panfrost_pending_batches_access_bo(struct panfrost_context
*ctx
,
1132 const struct panfrost_bo
*bo
)
1134 struct panfrost_bo_access
*access
;
1135 struct hash_entry
*hentry
;
1137 hentry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
1138 access
= hentry
? hentry
->data
: NULL
;
1142 if (access
->writer
&& access
->writer
->batch
)
1145 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
1147 if (*reader
&& (*reader
)->batch
)
1155 panfrost_flush_batches_accessing_bo(struct panfrost_context
*ctx
,
1156 struct panfrost_bo
*bo
,
1157 uint32_t access_type
)
1159 struct panfrost_bo_access
*access
;
1160 struct hash_entry
*hentry
;
1162 /* It doesn't make any to flush only the readers. */
1163 assert(access_type
== PAN_BO_ACCESS_WRITE
||
1164 access_type
== PAN_BO_ACCESS_RW
);
1166 hentry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
1167 access
= hentry
? hentry
->data
: NULL
;
1171 if (access_type
& PAN_BO_ACCESS_WRITE
&& access
->writer
&&
1172 access
->writer
->batch
)
1173 panfrost_batch_submit(access
->writer
->batch
);
1175 if (!(access_type
& PAN_BO_ACCESS_READ
))
1178 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
1180 if (*reader
&& (*reader
)->batch
)
1181 panfrost_batch_submit((*reader
)->batch
);
1186 panfrost_batch_set_requirements(struct panfrost_batch
*batch
)
1188 struct panfrost_context
*ctx
= batch
->ctx
;
1190 if (ctx
->rasterizer
&& ctx
->rasterizer
->base
.multisample
)
1191 batch
->requirements
|= PAN_REQ_MSAA
;
1193 if (ctx
->depth_stencil
&& ctx
->depth_stencil
->depth
.writemask
)
1194 batch
->requirements
|= PAN_REQ_DEPTH_WRITE
;
1198 panfrost_batch_adjust_stack_size(struct panfrost_batch
*batch
)
1200 struct panfrost_context
*ctx
= batch
->ctx
;
1202 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1203 struct panfrost_shader_state
*ss
;
1205 ss
= panfrost_get_shader_state(ctx
, i
);
1209 batch
->stack_size
= MAX2(batch
->stack_size
, ss
->stack_size
);
1213 /* Helper to smear a 32-bit color across 128-bit components */
1216 pan_pack_color_32(uint32_t *packed
, uint32_t v
)
1218 for (unsigned i
= 0; i
< 4; ++i
)
1223 pan_pack_color_64(uint32_t *packed
, uint32_t lo
, uint32_t hi
)
1225 for (unsigned i
= 0; i
< 4; i
+= 2) {
1232 pan_pack_color(uint32_t *packed
, const union pipe_color_union
*color
, enum pipe_format format
)
1234 /* Alpha magicked to 1.0 if there is no alpha */
1236 bool has_alpha
= util_format_has_alpha(format
);
1237 float clear_alpha
= has_alpha
? color
->f
[3] : 1.0f
;
1239 /* Packed color depends on the framebuffer format */
1241 const struct util_format_description
*desc
=
1242 util_format_description(format
);
1244 if (util_format_is_rgba8_variant(desc
)) {
1245 pan_pack_color_32(packed
,
1246 ((uint32_t) float_to_ubyte(clear_alpha
) << 24) |
1247 ((uint32_t) float_to_ubyte(color
->f
[2]) << 16) |
1248 ((uint32_t) float_to_ubyte(color
->f
[1]) << 8) |
1249 ((uint32_t) float_to_ubyte(color
->f
[0]) << 0));
1250 } else if (format
== PIPE_FORMAT_B5G6R5_UNORM
) {
1251 /* First, we convert the components to R5, G6, B5 separately */
1252 unsigned r5
= _mesa_roundevenf(SATURATE(color
->f
[0]) * 31.0);
1253 unsigned g6
= _mesa_roundevenf(SATURATE(color
->f
[1]) * 63.0);
1254 unsigned b5
= _mesa_roundevenf(SATURATE(color
->f
[2]) * 31.0);
1256 /* Then we pack into a sparse u32. TODO: Why these shifts? */
1257 pan_pack_color_32(packed
, (b5
<< 25) | (g6
<< 14) | (r5
<< 5));
1258 } else if (format
== PIPE_FORMAT_B4G4R4A4_UNORM
) {
1259 /* Convert to 4-bits */
1260 unsigned r4
= _mesa_roundevenf(SATURATE(color
->f
[0]) * 15.0);
1261 unsigned g4
= _mesa_roundevenf(SATURATE(color
->f
[1]) * 15.0);
1262 unsigned b4
= _mesa_roundevenf(SATURATE(color
->f
[2]) * 15.0);
1263 unsigned a4
= _mesa_roundevenf(SATURATE(clear_alpha
) * 15.0);
1265 /* Pack on *byte* intervals */
1266 pan_pack_color_32(packed
, (a4
<< 28) | (b4
<< 20) | (g4
<< 12) | (r4
<< 4));
1267 } else if (format
== PIPE_FORMAT_B5G5R5A1_UNORM
) {
1268 /* Scale as expected but shift oddly */
1269 unsigned r5
= _mesa_roundevenf(SATURATE(color
->f
[0]) * 31.0);
1270 unsigned g5
= _mesa_roundevenf(SATURATE(color
->f
[1]) * 31.0);
1271 unsigned b5
= _mesa_roundevenf(SATURATE(color
->f
[2]) * 31.0);
1272 unsigned a1
= _mesa_roundevenf(SATURATE(clear_alpha
) * 1.0);
1274 pan_pack_color_32(packed
, (a1
<< 31) | (b5
<< 25) | (g5
<< 15) | (r5
<< 5));
1276 /* Otherwise, it's generic subject to replication */
1278 union util_color out
= { 0 };
1279 unsigned size
= util_format_get_blocksize(format
);
1281 util_pack_color(color
->f
, format
, &out
);
1284 unsigned b
= out
.ui
[0];
1285 unsigned s
= b
| (b
<< 8);
1286 pan_pack_color_32(packed
, s
| (s
<< 16));
1287 } else if (size
== 2)
1288 pan_pack_color_32(packed
, out
.ui
[0] | (out
.ui
[0] << 16));
1289 else if (size
== 3 || size
== 4)
1290 pan_pack_color_32(packed
, out
.ui
[0]);
1292 pan_pack_color_64(packed
, out
.ui
[0], out
.ui
[1] | (out
.ui
[1] << 16)); /* RGB16F -- RGBB */
1294 pan_pack_color_64(packed
, out
.ui
[0], out
.ui
[1]);
1295 else if (size
== 16)
1296 memcpy(packed
, out
.ui
, 16);
1298 unreachable("Unknown generic format size packing clear colour");
1303 panfrost_batch_clear(struct panfrost_batch
*batch
,
1305 const union pipe_color_union
*color
,
1306 double depth
, unsigned stencil
)
1308 struct panfrost_context
*ctx
= batch
->ctx
;
1310 if (buffers
& PIPE_CLEAR_COLOR
) {
1311 for (unsigned i
= 0; i
< PIPE_MAX_COLOR_BUFS
; ++i
) {
1312 if (!(buffers
& (PIPE_CLEAR_COLOR0
<< i
)))
1315 enum pipe_format format
= ctx
->pipe_framebuffer
.cbufs
[i
]->format
;
1316 pan_pack_color(batch
->clear_color
[i
], color
, format
);
1320 if (buffers
& PIPE_CLEAR_DEPTH
) {
1321 batch
->clear_depth
= depth
;
1324 if (buffers
& PIPE_CLEAR_STENCIL
) {
1325 batch
->clear_stencil
= stencil
;
1328 batch
->clear
|= buffers
;
1330 /* Clearing affects the entire framebuffer (by definition -- this is
1331 * the Gallium clear callback, which clears the whole framebuffer. If
1332 * the scissor test were enabled from the GL side, the gallium frontend
1333 * would emit a quad instead and we wouldn't go down this code path) */
1335 panfrost_batch_union_scissor(batch
, 0, 0,
1336 ctx
->pipe_framebuffer
.width
,
1337 ctx
->pipe_framebuffer
.height
);
1341 panfrost_batch_compare(const void *a
, const void *b
)
1343 return util_framebuffer_state_equal(a
, b
);
1347 panfrost_batch_hash(const void *key
)
1349 return _mesa_hash_data(key
, sizeof(struct pipe_framebuffer_state
));
1352 /* Given a new bounding rectangle (scissor), let the job cover the union of the
1353 * new and old bounding rectangles */
1356 panfrost_batch_union_scissor(struct panfrost_batch
*batch
,
1357 unsigned minx
, unsigned miny
,
1358 unsigned maxx
, unsigned maxy
)
1360 batch
->minx
= MIN2(batch
->minx
, minx
);
1361 batch
->miny
= MIN2(batch
->miny
, miny
);
1362 batch
->maxx
= MAX2(batch
->maxx
, maxx
);
1363 batch
->maxy
= MAX2(batch
->maxy
, maxy
);
1367 panfrost_batch_intersection_scissor(struct panfrost_batch
*batch
,
1368 unsigned minx
, unsigned miny
,
1369 unsigned maxx
, unsigned maxy
)
1371 batch
->minx
= MAX2(batch
->minx
, minx
);
1372 batch
->miny
= MAX2(batch
->miny
, miny
);
1373 batch
->maxx
= MIN2(batch
->maxx
, maxx
);
1374 batch
->maxy
= MIN2(batch
->maxy
, maxy
);
1377 /* Are we currently rendering to the dev (rather than an FBO)? */
1380 panfrost_batch_is_scanout(struct panfrost_batch
*batch
)
1382 /* If there is no color buffer, it's an FBO */
1383 if (batch
->key
.nr_cbufs
!= 1)
1386 /* If we're too early that no framebuffer was sent, it's scanout */
1387 if (!batch
->key
.cbufs
[0])
1390 return batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_DISPLAY_TARGET
||
1391 batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_SCANOUT
||
1392 batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_SHARED
;
1396 panfrost_batch_init(struct panfrost_context
*ctx
)
1398 ctx
->batches
= _mesa_hash_table_create(ctx
,
1399 panfrost_batch_hash
,
1400 panfrost_batch_compare
);
1401 ctx
->accessed_bos
= _mesa_hash_table_create(ctx
, _mesa_hash_pointer
,
1402 _mesa_key_pointer_equal
);