2 * Copyright (C) 2019 Alyssa Rosenzweig
3 * Copyright (C) 2014-2017 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "drm-uapi/panfrost_drm.h"
31 #include "pan_context.h"
32 #include "util/hash_table.h"
33 #include "util/ralloc.h"
34 #include "util/format/u_format.h"
35 #include "util/u_pack_color.h"
36 #include "util/rounding.h"
38 #include "pandecode/decode.h"
39 #include "panfrost-quirks.h"
41 /* panfrost_bo_access is here to help us keep track of batch accesses to BOs
42 * and build a proper dependency graph such that batches can be pipelined for
43 * better GPU utilization.
45 * Each accessed BO has a corresponding entry in the ->accessed_bos hash table.
46 * A BO is either being written or read at any time (see if writer != NULL).
47 * When the last access is a write, the batch writing the BO might have read
48 * dependencies (readers that have not been executed yet and want to read the
49 * previous BO content), and when the last access is a read, all readers might
50 * depend on another batch to push its results to memory. That's what the
51 * readers/writers keep track off.
52 * There can only be one writer at any given time, if a new batch wants to
53 * write to the same BO, a dependency will be added between the new writer and
54 * the old writer (at the batch level), and panfrost_bo_access->writer will be
55 * updated to point to the new writer.
57 struct panfrost_bo_access
{
58 struct util_dynarray readers
;
59 struct panfrost_batch_fence
*writer
;
62 static struct panfrost_batch_fence
*
63 panfrost_create_batch_fence(struct panfrost_batch
*batch
)
65 struct panfrost_batch_fence
*fence
;
68 fence
= rzalloc(NULL
, struct panfrost_batch_fence
);
70 pipe_reference_init(&fence
->reference
, 1);
71 fence
->ctx
= batch
->ctx
;
73 ret
= drmSyncobjCreate(pan_device(batch
->ctx
->base
.screen
)->fd
, 0,
81 panfrost_free_batch_fence(struct panfrost_batch_fence
*fence
)
83 drmSyncobjDestroy(pan_device(fence
->ctx
->base
.screen
)->fd
,
89 panfrost_batch_fence_unreference(struct panfrost_batch_fence
*fence
)
91 if (pipe_reference(&fence
->reference
, NULL
))
92 panfrost_free_batch_fence(fence
);
96 panfrost_batch_fence_reference(struct panfrost_batch_fence
*fence
)
98 pipe_reference(NULL
, &fence
->reference
);
101 static struct panfrost_batch
*
102 panfrost_create_batch(struct panfrost_context
*ctx
,
103 const struct pipe_framebuffer_state
*key
)
105 struct panfrost_batch
*batch
= rzalloc(ctx
, struct panfrost_batch
);
109 batch
->bos
= _mesa_hash_table_create(batch
, _mesa_hash_pointer
,
110 _mesa_key_pointer_equal
);
112 batch
->minx
= batch
->miny
= ~0;
113 batch
->maxx
= batch
->maxy
= 0;
115 batch
->out_sync
= panfrost_create_batch_fence(batch
);
116 util_copy_framebuffer_state(&batch
->key
, key
);
118 batch
->pool
= panfrost_create_pool(batch
, pan_device(ctx
->base
.screen
));
124 panfrost_freeze_batch(struct panfrost_batch
*batch
)
126 struct panfrost_context
*ctx
= batch
->ctx
;
127 struct hash_entry
*entry
;
129 /* Remove the entry in the FBO -> batch hash table if the batch
130 * matches. This way, next draws/clears targeting this FBO will trigger
131 * the creation of a new batch.
133 entry
= _mesa_hash_table_search(ctx
->batches
, &batch
->key
);
134 if (entry
&& entry
->data
== batch
)
135 _mesa_hash_table_remove(ctx
->batches
, entry
);
137 /* If this is the bound batch, the panfrost_context parameters are
138 * relevant so submitting it invalidates those parameters, but if it's
139 * not bound, the context parameters are for some other batch so we
140 * can't invalidate them.
142 if (ctx
->batch
== batch
) {
143 panfrost_invalidate_frame(ctx
);
148 #ifdef PAN_BATCH_DEBUG
149 static bool panfrost_batch_is_frozen(struct panfrost_batch
*batch
)
151 struct panfrost_context
*ctx
= batch
->ctx
;
152 struct hash_entry
*entry
;
154 entry
= _mesa_hash_table_search(ctx
->batches
, &batch
->key
);
155 if (entry
&& entry
->data
== batch
)
158 if (ctx
->batch
== batch
)
166 panfrost_free_batch(struct panfrost_batch
*batch
)
171 #ifdef PAN_BATCH_DEBUG
172 assert(panfrost_batch_is_frozen(batch
));
175 hash_table_foreach(batch
->bos
, entry
)
176 panfrost_bo_unreference((struct panfrost_bo
*)entry
->key
);
178 hash_table_foreach(batch
->pool
.bos
, entry
)
179 panfrost_bo_unreference((struct panfrost_bo
*)entry
->key
);
181 util_dynarray_foreach(&batch
->dependencies
,
182 struct panfrost_batch_fence
*, dep
) {
183 panfrost_batch_fence_unreference(*dep
);
186 /* The out_sync fence lifetime is different from the the batch one
187 * since other batches might want to wait on a fence of already
188 * submitted/signaled batch. All we need to do here is make sure the
189 * fence does not point to an invalid batch, which the core will
190 * interpret as 'batch is already submitted'.
192 batch
->out_sync
->batch
= NULL
;
193 panfrost_batch_fence_unreference(batch
->out_sync
);
195 util_unreference_framebuffer_state(&batch
->key
);
199 #ifdef PAN_BATCH_DEBUG
201 panfrost_dep_graph_contains_batch(struct panfrost_batch
*root
,
202 struct panfrost_batch
*batch
)
207 util_dynarray_foreach(&root
->dependencies
,
208 struct panfrost_batch_fence
*, dep
) {
209 if ((*dep
)->batch
== batch
||
210 panfrost_dep_graph_contains_batch((*dep
)->batch
, batch
))
219 panfrost_batch_add_dep(struct panfrost_batch
*batch
,
220 struct panfrost_batch_fence
*newdep
)
222 if (batch
== newdep
->batch
)
225 /* We might want to turn ->dependencies into a set if the number of
226 * deps turns out to be big enough to make this 'is dep already there'
227 * search inefficient.
229 util_dynarray_foreach(&batch
->dependencies
,
230 struct panfrost_batch_fence
*, dep
) {
235 #ifdef PAN_BATCH_DEBUG
236 /* Make sure the dependency graph is acyclic. */
237 assert(!panfrost_dep_graph_contains_batch(newdep
->batch
, batch
));
240 panfrost_batch_fence_reference(newdep
);
241 util_dynarray_append(&batch
->dependencies
,
242 struct panfrost_batch_fence
*, newdep
);
244 /* We now have a batch depending on us, let's make sure new draw/clear
245 * calls targeting the same FBO use a new batch object.
248 panfrost_freeze_batch(newdep
->batch
);
251 static struct panfrost_batch
*
252 panfrost_get_batch(struct panfrost_context
*ctx
,
253 const struct pipe_framebuffer_state
*key
)
255 /* Lookup the job first */
256 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->batches
, key
);
261 /* Otherwise, let's create a job */
263 struct panfrost_batch
*batch
= panfrost_create_batch(ctx
, key
);
265 /* Save the created job */
266 _mesa_hash_table_insert(ctx
->batches
, &batch
->key
, batch
);
271 /* Get the job corresponding to the FBO we're currently rendering into */
273 struct panfrost_batch
*
274 panfrost_get_batch_for_fbo(struct panfrost_context
*ctx
)
276 /* If we're wallpapering, we special case to workaround
279 if (ctx
->wallpaper_batch
)
280 return ctx
->wallpaper_batch
;
282 /* If we already began rendering, use that */
285 assert(util_framebuffer_state_equal(&ctx
->batch
->key
,
286 &ctx
->pipe_framebuffer
));
290 /* If not, look up the job */
291 struct panfrost_batch
*batch
= panfrost_get_batch(ctx
,
292 &ctx
->pipe_framebuffer
);
294 /* Set this job as the current FBO job. Will be reset when updating the
295 * FB state and when submitting or releasing a job.
301 struct panfrost_batch
*
302 panfrost_get_fresh_batch_for_fbo(struct panfrost_context
*ctx
)
304 struct panfrost_batch
*batch
;
306 batch
= panfrost_get_batch(ctx
, &ctx
->pipe_framebuffer
);
308 /* The batch has no draw/clear queued, let's return it directly.
309 * Note that it's perfectly fine to re-use a batch with an
310 * existing clear, we'll just update it with the new clear request.
312 if (!batch
->scoreboard
.first_job
)
315 /* Otherwise, we need to freeze the existing one and instantiate a new
318 panfrost_freeze_batch(batch
);
319 return panfrost_get_batch(ctx
, &ctx
->pipe_framebuffer
);
323 panfrost_batch_fence_is_signaled(struct panfrost_batch_fence
*fence
)
328 /* Batch has not been submitted yet. */
332 int ret
= drmSyncobjWait(pan_device(fence
->ctx
->base
.screen
)->fd
,
333 &fence
->syncobj
, 1, 0, 0, NULL
);
335 /* Cache whether the fence was signaled */
336 fence
->signaled
= ret
>= 0;
337 return fence
->signaled
;
341 panfrost_bo_access_gc_fences(struct panfrost_context
*ctx
,
342 struct panfrost_bo_access
*access
,
343 const struct panfrost_bo
*bo
)
345 if (access
->writer
&& panfrost_batch_fence_is_signaled(access
->writer
)) {
346 panfrost_batch_fence_unreference(access
->writer
);
347 access
->writer
= NULL
;
350 struct panfrost_batch_fence
**readers_array
= util_dynarray_begin(&access
->readers
);
351 struct panfrost_batch_fence
**new_readers
= readers_array
;
353 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
358 if (panfrost_batch_fence_is_signaled(*reader
)) {
359 panfrost_batch_fence_unreference(*reader
);
362 /* Build a new array of only unsignaled fences in-place */
363 *(new_readers
++) = *reader
;
367 if (!util_dynarray_resize(&access
->readers
, struct panfrost_batch_fence
*,
368 new_readers
- readers_array
) &&
369 new_readers
!= readers_array
)
370 unreachable("Invalid dynarray access->readers");
373 /* Collect signaled fences to keep the kernel-side syncobj-map small. The
374 * idea is to collect those signaled fences at the end of each flush_all
375 * call. This function is likely to collect only fences from previous
376 * batch flushes not the one that have just have just been submitted and
377 * are probably still in flight when we trigger the garbage collection.
378 * Anyway, we need to do this garbage collection at some point if we don't
379 * want the BO access map to keep invalid entries around and retain
383 panfrost_gc_fences(struct panfrost_context
*ctx
)
385 hash_table_foreach(ctx
->accessed_bos
, entry
) {
386 struct panfrost_bo_access
*access
= entry
->data
;
389 panfrost_bo_access_gc_fences(ctx
, access
, entry
->key
);
390 if (!util_dynarray_num_elements(&access
->readers
,
391 struct panfrost_batch_fence
*) &&
394 _mesa_hash_table_remove(ctx
->accessed_bos
, entry
);
399 #ifdef PAN_BATCH_DEBUG
401 panfrost_batch_in_readers(struct panfrost_batch
*batch
,
402 struct panfrost_bo_access
*access
)
404 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
406 if (*reader
&& (*reader
)->batch
== batch
)
415 panfrost_batch_update_bo_access(struct panfrost_batch
*batch
,
416 struct panfrost_bo
*bo
, bool writes
,
417 bool already_accessed
)
419 struct panfrost_context
*ctx
= batch
->ctx
;
420 struct panfrost_bo_access
*access
;
421 bool old_writes
= false;
422 struct hash_entry
*entry
;
424 entry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
425 access
= entry
? entry
->data
: NULL
;
427 old_writes
= access
->writer
!= NULL
;
429 access
= rzalloc(ctx
, struct panfrost_bo_access
);
430 util_dynarray_init(&access
->readers
, access
);
431 _mesa_hash_table_insert(ctx
->accessed_bos
, bo
, access
);
432 /* We are the first to access this BO, let's initialize
433 * old_writes to our own access type in that case.
440 if (writes
&& !old_writes
) {
441 /* Previous access was a read and we want to write this BO.
442 * We first need to add explicit deps between our batch and
443 * the previous readers.
445 util_dynarray_foreach(&access
->readers
,
446 struct panfrost_batch_fence
*, reader
) {
447 /* We were already reading the BO, no need to add a dep
448 * on ourself (the acyclic check would complain about
451 if (!(*reader
) || (*reader
)->batch
== batch
)
454 panfrost_batch_add_dep(batch
, *reader
);
456 panfrost_batch_fence_reference(batch
->out_sync
);
459 panfrost_batch_fence_unreference(access
->writer
);
461 /* We now are the new writer. */
462 access
->writer
= batch
->out_sync
;
464 /* Release the previous readers and reset the readers array. */
465 util_dynarray_foreach(&access
->readers
,
466 struct panfrost_batch_fence
*,
470 panfrost_batch_fence_unreference(*reader
);
473 util_dynarray_clear(&access
->readers
);
474 } else if (writes
&& old_writes
) {
475 /* First check if we were the previous writer, in that case
476 * there's nothing to do. Otherwise we need to add a
477 * dependency between the new writer and the old one.
479 if (access
->writer
!= batch
->out_sync
) {
480 if (access
->writer
) {
481 panfrost_batch_add_dep(batch
, access
->writer
);
482 panfrost_batch_fence_unreference(access
->writer
);
484 panfrost_batch_fence_reference(batch
->out_sync
);
485 access
->writer
= batch
->out_sync
;
487 } else if (!writes
&& old_writes
) {
488 /* First check if we were the previous writer, in that case
489 * we want to keep the access type unchanged, as a write is
490 * more constraining than a read.
492 if (access
->writer
!= batch
->out_sync
) {
493 /* Add a dependency on the previous writer. */
494 panfrost_batch_add_dep(batch
, access
->writer
);
496 /* The previous access was a write, there's no reason
497 * to have entries in the readers array.
499 assert(!util_dynarray_num_elements(&access
->readers
,
500 struct panfrost_batch_fence
*));
502 /* Add ourselves to the readers array. */
503 panfrost_batch_fence_reference(batch
->out_sync
);
504 util_dynarray_append(&access
->readers
,
505 struct panfrost_batch_fence
*,
507 access
->writer
= NULL
;
510 /* We already accessed this BO before, so we should already be
511 * in the reader array.
513 #ifdef PAN_BATCH_DEBUG
514 if (already_accessed
) {
515 assert(panfrost_batch_in_readers(batch
, access
));
520 /* Previous access was a read and we want to read this BO.
521 * Add ourselves to the readers array and add a dependency on
522 * the previous writer if any.
524 panfrost_batch_fence_reference(batch
->out_sync
);
525 util_dynarray_append(&access
->readers
,
526 struct panfrost_batch_fence
*,
530 panfrost_batch_add_dep(batch
, access
->writer
);
535 panfrost_batch_add_bo(struct panfrost_batch
*batch
, struct panfrost_bo
*bo
,
541 struct hash_entry
*entry
;
542 uint32_t old_flags
= 0;
544 entry
= _mesa_hash_table_search(batch
->bos
, bo
);
546 entry
= _mesa_hash_table_insert(batch
->bos
, bo
,
547 (void *)(uintptr_t)flags
);
548 panfrost_bo_reference(bo
);
550 old_flags
= (uintptr_t)entry
->data
;
552 /* All batches have to agree on the shared flag. */
553 assert((old_flags
& PAN_BO_ACCESS_SHARED
) ==
554 (flags
& PAN_BO_ACCESS_SHARED
));
559 if (old_flags
== flags
)
563 entry
->data
= (void *)(uintptr_t)flags
;
565 /* If this is not a shared BO, we don't really care about dependency
568 if (!(flags
& PAN_BO_ACCESS_SHARED
))
571 /* All dependencies should have been flushed before we execute the
572 * wallpaper draw, so it should be harmless to skip the
573 * update_bo_access() call.
575 if (batch
== batch
->ctx
->wallpaper_batch
)
578 assert(flags
& PAN_BO_ACCESS_RW
);
579 panfrost_batch_update_bo_access(batch
, bo
, flags
& PAN_BO_ACCESS_WRITE
,
584 panfrost_batch_add_resource_bos(struct panfrost_batch
*batch
,
585 struct panfrost_resource
*rsrc
,
588 panfrost_batch_add_bo(batch
, rsrc
->bo
, flags
);
590 for (unsigned i
= 0; i
< MAX_MIP_LEVELS
; i
++)
591 if (rsrc
->slices
[i
].checksum_bo
)
592 panfrost_batch_add_bo(batch
, rsrc
->slices
[i
].checksum_bo
, flags
);
594 if (rsrc
->separate_stencil
)
595 panfrost_batch_add_bo(batch
, rsrc
->separate_stencil
->bo
, flags
);
598 void panfrost_batch_add_fbo_bos(struct panfrost_batch
*batch
)
600 uint32_t flags
= PAN_BO_ACCESS_SHARED
| PAN_BO_ACCESS_WRITE
|
601 PAN_BO_ACCESS_VERTEX_TILER
|
602 PAN_BO_ACCESS_FRAGMENT
;
604 for (unsigned i
= 0; i
< batch
->key
.nr_cbufs
; ++i
) {
605 struct panfrost_resource
*rsrc
= pan_resource(batch
->key
.cbufs
[i
]->texture
);
606 panfrost_batch_add_resource_bos(batch
, rsrc
, flags
);
609 if (batch
->key
.zsbuf
) {
610 struct panfrost_resource
*rsrc
= pan_resource(batch
->key
.zsbuf
->texture
);
611 panfrost_batch_add_resource_bos(batch
, rsrc
, flags
);
616 panfrost_batch_create_bo(struct panfrost_batch
*batch
, size_t size
,
617 uint32_t create_flags
, uint32_t access_flags
)
619 struct panfrost_bo
*bo
;
621 bo
= panfrost_bo_create(pan_device(batch
->ctx
->base
.screen
), size
,
623 panfrost_batch_add_bo(batch
, bo
, access_flags
);
625 /* panfrost_batch_add_bo() has retained a reference and
626 * panfrost_bo_create() initialize the refcnt to 1, so let's
627 * unreference the BO here so it gets released when the batch is
628 * destroyed (unless it's retained by someone else in the meantime).
630 panfrost_bo_unreference(bo
);
634 /* Returns the polygon list's GPU address if available, or otherwise allocates
635 * the polygon list. It's perfectly fast to use allocate/free BO directly,
636 * since we'll hit the BO cache and this is one-per-batch anyway. */
639 panfrost_batch_get_polygon_list(struct panfrost_batch
*batch
, unsigned size
)
641 if (batch
->polygon_list
) {
642 assert(batch
->polygon_list
->size
>= size
);
644 /* Create the BO as invisible, as there's no reason to map */
645 size
= util_next_power_of_two(size
);
647 batch
->polygon_list
= panfrost_batch_create_bo(batch
, size
,
649 PAN_BO_ACCESS_PRIVATE
|
651 PAN_BO_ACCESS_VERTEX_TILER
|
652 PAN_BO_ACCESS_FRAGMENT
);
655 return batch
->polygon_list
->gpu
;
659 panfrost_batch_get_scratchpad(struct panfrost_batch
*batch
,
661 unsigned thread_tls_alloc
,
664 unsigned size
= panfrost_get_total_stack_size(shift
,
668 if (batch
->scratchpad
) {
669 assert(batch
->scratchpad
->size
>= size
);
671 batch
->scratchpad
= panfrost_batch_create_bo(batch
, size
,
673 PAN_BO_ACCESS_PRIVATE
|
675 PAN_BO_ACCESS_VERTEX_TILER
|
676 PAN_BO_ACCESS_FRAGMENT
);
679 return batch
->scratchpad
;
683 panfrost_batch_get_shared_memory(struct panfrost_batch
*batch
,
685 unsigned workgroup_count
)
687 if (batch
->shared_memory
) {
688 assert(batch
->shared_memory
->size
>= size
);
690 batch
->shared_memory
= panfrost_batch_create_bo(batch
, size
,
692 PAN_BO_ACCESS_PRIVATE
|
694 PAN_BO_ACCESS_VERTEX_TILER
);
697 return batch
->shared_memory
;
701 panfrost_batch_get_tiler_heap(struct panfrost_batch
*batch
)
703 if (batch
->tiler_heap
)
704 return batch
->tiler_heap
;
706 batch
->tiler_heap
= panfrost_batch_create_bo(batch
, 4096 * 4096,
709 PAN_BO_ACCESS_PRIVATE
|
711 PAN_BO_ACCESS_VERTEX_TILER
|
712 PAN_BO_ACCESS_FRAGMENT
);
713 assert(batch
->tiler_heap
);
714 return batch
->tiler_heap
;
718 panfrost_batch_get_tiler_meta(struct panfrost_batch
*batch
, unsigned vertex_count
)
723 if (batch
->tiler_meta
)
724 return batch
->tiler_meta
;
726 struct panfrost_bo
*tiler_heap
;
727 tiler_heap
= panfrost_batch_get_tiler_heap(batch
);
729 struct bifrost_tiler_heap_meta tiler_heap_meta
= {
730 .heap_size
= tiler_heap
->size
,
731 .tiler_heap_start
= tiler_heap
->gpu
,
732 .tiler_heap_free
= tiler_heap
->gpu
,
733 .tiler_heap_end
= tiler_heap
->gpu
+ tiler_heap
->size
,
735 .unk7e007e
= 0x7e007e,
738 struct bifrost_tiler_meta tiler_meta
= {
739 .hierarchy_mask
= 0x28,
741 .width
= MALI_POSITIVE(batch
->key
.width
),
742 .height
= MALI_POSITIVE(batch
->key
.height
),
743 .tiler_heap_meta
= panfrost_pool_upload(&batch
->pool
, &tiler_heap_meta
, sizeof(tiler_heap_meta
)),
746 batch
->tiler_meta
= panfrost_pool_upload(&batch
->pool
, &tiler_meta
, sizeof(tiler_meta
));
747 return batch
->tiler_meta
;
751 panfrost_batch_get_tiler_dummy(struct panfrost_batch
*batch
)
753 struct panfrost_device
*dev
= pan_device(batch
->ctx
->base
.screen
);
755 uint32_t create_flags
= 0;
757 if (batch
->tiler_dummy
)
758 return batch
->tiler_dummy
;
760 if (!(dev
->quirks
& MIDGARD_NO_HIER_TILING
))
761 create_flags
= PAN_BO_INVISIBLE
;
763 batch
->tiler_dummy
= panfrost_batch_create_bo(batch
, 4096,
765 PAN_BO_ACCESS_PRIVATE
|
767 PAN_BO_ACCESS_VERTEX_TILER
|
768 PAN_BO_ACCESS_FRAGMENT
);
769 assert(batch
->tiler_dummy
);
770 return batch
->tiler_dummy
;
774 panfrost_batch_draw_wallpaper(struct panfrost_batch
*batch
)
776 /* Color 0 is cleared, no need to draw the wallpaper.
777 * TODO: MRT wallpapers.
779 if (batch
->clear
& PIPE_CLEAR_COLOR0
)
782 /* Nothing to reload? TODO: MRT wallpapers */
783 if (batch
->key
.cbufs
[0] == NULL
)
786 /* No draw calls, and no clear on the depth/stencil bufs.
787 * Drawing the wallpaper would be useless.
789 if (!batch
->scoreboard
.tiler_dep
&&
790 !(batch
->clear
& PIPE_CLEAR_DEPTHSTENCIL
))
793 /* Check if the buffer has any content on it worth preserving */
795 struct pipe_surface
*surf
= batch
->key
.cbufs
[0];
796 struct panfrost_resource
*rsrc
= pan_resource(surf
->texture
);
797 unsigned level
= surf
->u
.tex
.level
;
799 if (!rsrc
->slices
[level
].initialized
)
802 batch
->ctx
->wallpaper_batch
= batch
;
804 /* Clamp the rendering area to the damage extent. The
805 * KHR_partial_update() spec states that trying to render outside of
806 * the damage region is "undefined behavior", so we should be safe.
808 unsigned damage_width
= (rsrc
->damage
.extent
.maxx
- rsrc
->damage
.extent
.minx
);
809 unsigned damage_height
= (rsrc
->damage
.extent
.maxy
- rsrc
->damage
.extent
.miny
);
811 if (damage_width
&& damage_height
) {
812 panfrost_batch_intersection_scissor(batch
,
813 rsrc
->damage
.extent
.minx
,
814 rsrc
->damage
.extent
.miny
,
815 rsrc
->damage
.extent
.maxx
,
816 rsrc
->damage
.extent
.maxy
);
819 /* FIXME: Looks like aligning on a tile is not enough, but
820 * aligning on twice the tile size seems to works. We don't
821 * know exactly what happens here but this deserves extra
822 * investigation to figure it out.
824 batch
->minx
= batch
->minx
& ~((MALI_TILE_LENGTH
* 2) - 1);
825 batch
->miny
= batch
->miny
& ~((MALI_TILE_LENGTH
* 2) - 1);
826 batch
->maxx
= MIN2(ALIGN_POT(batch
->maxx
, MALI_TILE_LENGTH
* 2),
828 batch
->maxy
= MIN2(ALIGN_POT(batch
->maxy
, MALI_TILE_LENGTH
* 2),
831 struct pipe_scissor_state damage
;
832 struct pipe_box rects
[4];
834 /* Clamp the damage box to the rendering area. */
835 damage
.minx
= MAX2(batch
->minx
, rsrc
->damage
.biggest_rect
.x
);
836 damage
.miny
= MAX2(batch
->miny
, rsrc
->damage
.biggest_rect
.y
);
837 damage
.maxx
= MIN2(batch
->maxx
,
838 rsrc
->damage
.biggest_rect
.x
+
839 rsrc
->damage
.biggest_rect
.width
);
840 damage
.maxx
= MAX2(damage
.maxx
, damage
.minx
);
841 damage
.maxy
= MIN2(batch
->maxy
,
842 rsrc
->damage
.biggest_rect
.y
+
843 rsrc
->damage
.biggest_rect
.height
);
844 damage
.maxy
= MAX2(damage
.maxy
, damage
.miny
);
846 /* One damage rectangle means we can end up with at most 4 reload
848 * 1: left region, only exists if damage.x > 0
849 * 2: right region, only exists if damage.x + damage.width < fb->width
850 * 3: top region, only exists if damage.y > 0. The intersection with
851 * the left and right regions are dropped
852 * 4: bottom region, only exists if damage.y + damage.height < fb->height.
853 * The intersection with the left and right regions are dropped
855 * ____________________________
862 * |_______|___________|______|
864 u_box_2d(batch
->minx
, batch
->miny
, damage
.minx
- batch
->minx
,
865 batch
->maxy
- batch
->miny
, &rects
[0]);
866 u_box_2d(damage
.maxx
, batch
->miny
, batch
->maxx
- damage
.maxx
,
867 batch
->maxy
- batch
->miny
, &rects
[1]);
868 u_box_2d(damage
.minx
, batch
->miny
, damage
.maxx
- damage
.minx
,
869 damage
.miny
- batch
->miny
, &rects
[2]);
870 u_box_2d(damage
.minx
, damage
.maxy
, damage
.maxx
- damage
.minx
,
871 batch
->maxy
- damage
.maxy
, &rects
[3]);
873 for (unsigned i
= 0; i
< 4; i
++) {
874 /* Width and height are always >= 0 even if width is declared as a
875 * signed integer: u_box_2d() helper takes unsigned args and
876 * panfrost_set_damage_region() is taking care of clamping
879 if (!rects
[i
].width
|| !rects
[i
].height
)
882 /* Blit the wallpaper in */
883 panfrost_blit_wallpaper(batch
->ctx
, &rects
[i
]);
885 batch
->ctx
->wallpaper_batch
= NULL
;
889 panfrost_batch_record_bo(struct hash_entry
*entry
, unsigned *bo_handles
, unsigned idx
)
891 struct panfrost_bo
*bo
= (struct panfrost_bo
*)entry
->key
;
892 uint32_t flags
= (uintptr_t)entry
->data
;
894 assert(bo
->gem_handle
> 0);
895 bo_handles
[idx
] = bo
->gem_handle
;
897 /* Update the BO access flags so that panfrost_bo_wait() knows
898 * about all pending accesses.
899 * We only keep the READ/WRITE info since this is all the BO
900 * wait logic cares about.
901 * We also preserve existing flags as this batch might not
902 * be the first one to access the BO.
904 bo
->gpu_access
|= flags
& (PAN_BO_ACCESS_RW
);
908 panfrost_batch_submit_ioctl(struct panfrost_batch
*batch
,
909 mali_ptr first_job_desc
,
912 struct panfrost_context
*ctx
= batch
->ctx
;
913 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
914 struct panfrost_device
*dev
= pan_device(gallium
->screen
);
915 struct drm_panfrost_submit submit
= {0,};
916 uint32_t *bo_handles
, *in_syncs
= NULL
;
917 bool is_fragment_shader
;
920 is_fragment_shader
= (reqs
& PANFROST_JD_REQ_FS
) && batch
->scoreboard
.first_job
;
921 if (is_fragment_shader
)
922 submit
.in_sync_count
= 1;
924 submit
.in_sync_count
= util_dynarray_num_elements(&batch
->dependencies
,
925 struct panfrost_batch_fence
*);
927 if (submit
.in_sync_count
) {
928 in_syncs
= calloc(submit
.in_sync_count
, sizeof(*in_syncs
));
932 /* The fragment job always depends on the vertex/tiler job if there's
935 if (is_fragment_shader
) {
936 in_syncs
[0] = batch
->out_sync
->syncobj
;
940 util_dynarray_foreach(&batch
->dependencies
,
941 struct panfrost_batch_fence
*, dep
)
942 in_syncs
[i
++] = (*dep
)->syncobj
;
945 submit
.in_syncs
= (uintptr_t)in_syncs
;
946 submit
.out_sync
= batch
->out_sync
->syncobj
;
947 submit
.jc
= first_job_desc
;
948 submit
.requirements
= reqs
;
950 bo_handles
= calloc(batch
->pool
.bos
->entries
+ batch
->bos
->entries
, sizeof(*bo_handles
));
953 hash_table_foreach(batch
->bos
, entry
)
954 panfrost_batch_record_bo(entry
, bo_handles
, submit
.bo_handle_count
++);
956 hash_table_foreach(batch
->pool
.bos
, entry
)
957 panfrost_batch_record_bo(entry
, bo_handles
, submit
.bo_handle_count
++);
959 submit
.bo_handles
= (u64
) (uintptr_t) bo_handles
;
960 ret
= drmIoctl(dev
->fd
, DRM_IOCTL_PANFROST_SUBMIT
, &submit
);
965 if (dev
->debug
& PAN_DBG_MSGS
)
966 fprintf(stderr
, "Error submitting: %m\n");
971 /* Trace the job if we're doing that */
972 if (dev
->debug
& (PAN_DBG_TRACE
| PAN_DBG_SYNC
)) {
973 /* Wait so we can get errors reported back */
974 drmSyncobjWait(dev
->fd
, &batch
->out_sync
->syncobj
, 1,
977 /* Trace gets priority over sync */
978 bool minimal
= !(dev
->debug
& PAN_DBG_TRACE
);
979 pandecode_jc(submit
.jc
, dev
->quirks
& IS_BIFROST
, dev
->gpu_id
, minimal
);
986 panfrost_batch_submit_jobs(struct panfrost_batch
*batch
)
988 bool has_draws
= batch
->scoreboard
.first_job
;
992 ret
= panfrost_batch_submit_ioctl(batch
, batch
->scoreboard
.first_job
, 0);
996 if (batch
->scoreboard
.tiler_dep
|| batch
->clear
) {
997 mali_ptr fragjob
= panfrost_fragment_job(batch
, has_draws
);
998 ret
= panfrost_batch_submit_ioctl(batch
, fragjob
, PANFROST_JD_REQ_FS
);
1006 panfrost_batch_submit(struct panfrost_batch
*batch
)
1009 struct panfrost_device
*dev
= pan_device(batch
->ctx
->base
.screen
);
1011 /* Submit the dependencies first. */
1012 util_dynarray_foreach(&batch
->dependencies
,
1013 struct panfrost_batch_fence
*, dep
) {
1015 panfrost_batch_submit((*dep
)->batch
);
1020 /* Nothing to do! */
1021 if (!batch
->scoreboard
.first_job
&& !batch
->clear
) {
1022 /* Mark the fence as signaled so the fence logic does not try
1025 batch
->out_sync
->signaled
= true;
1029 panfrost_batch_draw_wallpaper(batch
);
1031 /* Now that all draws are in, we can finally prepare the
1032 * FBD for the batch */
1034 if (batch
->framebuffer
.gpu
&& batch
->scoreboard
.first_job
) {
1035 struct panfrost_context
*ctx
= batch
->ctx
;
1036 struct pipe_context
*gallium
= (struct pipe_context
*) ctx
;
1037 struct panfrost_device
*dev
= pan_device(gallium
->screen
);
1039 if (dev
->quirks
& MIDGARD_SFBD
)
1040 panfrost_attach_sfbd(batch
, ~0);
1042 panfrost_attach_mfbd(batch
, ~0);
1045 mali_ptr polygon_list
= panfrost_batch_get_polygon_list(batch
,
1046 MALI_TILER_MINIMUM_HEADER_SIZE
);
1048 panfrost_scoreboard_initialize_tiler(&batch
->pool
, &batch
->scoreboard
, polygon_list
);
1050 ret
= panfrost_batch_submit_jobs(batch
);
1052 if (ret
&& dev
->debug
& PAN_DBG_MSGS
)
1053 fprintf(stderr
, "panfrost_batch_submit failed: %d\n", ret
);
1055 /* We must reset the damage info of our render targets here even
1056 * though a damage reset normally happens when the DRI layer swaps
1057 * buffers. That's because there can be implicit flushes the GL
1058 * app is not aware of, and those might impact the damage region: if
1059 * part of the damaged portion is drawn during those implicit flushes,
1060 * you have to reload those areas before next draws are pushed, and
1061 * since the driver can't easily know what's been modified by the draws
1062 * it flushed, the easiest solution is to reload everything.
1064 for (unsigned i
= 0; i
< batch
->key
.nr_cbufs
; i
++) {
1065 struct panfrost_resource
*res
;
1067 if (!batch
->key
.cbufs
[i
])
1070 res
= pan_resource(batch
->key
.cbufs
[i
]->texture
);
1071 panfrost_resource_reset_damage(res
);
1075 panfrost_freeze_batch(batch
);
1076 panfrost_free_batch(batch
);
1080 panfrost_flush_all_batches(struct panfrost_context
*ctx
, bool wait
)
1082 struct util_dynarray fences
, syncobjs
;
1085 util_dynarray_init(&fences
, NULL
);
1086 util_dynarray_init(&syncobjs
, NULL
);
1089 hash_table_foreach(ctx
->batches
, hentry
) {
1090 struct panfrost_batch
*batch
= hentry
->data
;
1095 panfrost_batch_fence_reference(batch
->out_sync
);
1096 util_dynarray_append(&fences
, struct panfrost_batch_fence
*,
1098 util_dynarray_append(&syncobjs
, uint32_t,
1099 batch
->out_sync
->syncobj
);
1102 panfrost_batch_submit(batch
);
1105 assert(!ctx
->batches
->entries
);
1107 /* Collect batch fences before returning */
1108 panfrost_gc_fences(ctx
);
1113 drmSyncobjWait(pan_device(ctx
->base
.screen
)->fd
,
1114 util_dynarray_begin(&syncobjs
),
1115 util_dynarray_num_elements(&syncobjs
, uint32_t),
1116 INT64_MAX
, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
, NULL
);
1118 util_dynarray_foreach(&fences
, struct panfrost_batch_fence
*, fence
)
1119 panfrost_batch_fence_unreference(*fence
);
1121 util_dynarray_fini(&fences
);
1122 util_dynarray_fini(&syncobjs
);
1126 panfrost_pending_batches_access_bo(struct panfrost_context
*ctx
,
1127 const struct panfrost_bo
*bo
)
1129 struct panfrost_bo_access
*access
;
1130 struct hash_entry
*hentry
;
1132 hentry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
1133 access
= hentry
? hentry
->data
: NULL
;
1137 if (access
->writer
&& access
->writer
->batch
)
1140 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
1142 if (*reader
&& (*reader
)->batch
)
1149 /* We always flush writers. We might also need to flush readers */
1152 panfrost_flush_batches_accessing_bo(struct panfrost_context
*ctx
,
1153 struct panfrost_bo
*bo
,
1156 struct panfrost_bo_access
*access
;
1157 struct hash_entry
*hentry
;
1159 hentry
= _mesa_hash_table_search(ctx
->accessed_bos
, bo
);
1160 access
= hentry
? hentry
->data
: NULL
;
1164 if (access
->writer
&& access
->writer
->batch
)
1165 panfrost_batch_submit(access
->writer
->batch
);
1170 util_dynarray_foreach(&access
->readers
, struct panfrost_batch_fence
*,
1172 if (*reader
&& (*reader
)->batch
)
1173 panfrost_batch_submit((*reader
)->batch
);
1178 panfrost_batch_set_requirements(struct panfrost_batch
*batch
)
1180 struct panfrost_context
*ctx
= batch
->ctx
;
1182 if (ctx
->rasterizer
&& ctx
->rasterizer
->base
.multisample
)
1183 batch
->requirements
|= PAN_REQ_MSAA
;
1185 if (ctx
->depth_stencil
&& ctx
->depth_stencil
->depth
.writemask
) {
1186 batch
->requirements
|= PAN_REQ_DEPTH_WRITE
;
1187 batch
->draws
|= PIPE_CLEAR_DEPTH
;
1190 if (ctx
->depth_stencil
&& ctx
->depth_stencil
->stencil
[0].enabled
)
1191 batch
->draws
|= PIPE_CLEAR_STENCIL
;
1195 panfrost_batch_adjust_stack_size(struct panfrost_batch
*batch
)
1197 struct panfrost_context
*ctx
= batch
->ctx
;
1199 for (unsigned i
= 0; i
< PIPE_SHADER_TYPES
; ++i
) {
1200 struct panfrost_shader_state
*ss
;
1202 ss
= panfrost_get_shader_state(ctx
, i
);
1206 batch
->stack_size
= MAX2(batch
->stack_size
, ss
->stack_size
);
1210 /* Helper to smear a 32-bit color across 128-bit components */
1213 pan_pack_color_32(uint32_t *packed
, uint32_t v
)
1215 for (unsigned i
= 0; i
< 4; ++i
)
1220 pan_pack_color_64(uint32_t *packed
, uint32_t lo
, uint32_t hi
)
1222 for (unsigned i
= 0; i
< 4; i
+= 2) {
1229 pan_pack_color(uint32_t *packed
, const union pipe_color_union
*color
, enum pipe_format format
)
1231 /* Alpha magicked to 1.0 if there is no alpha */
1233 bool has_alpha
= util_format_has_alpha(format
);
1234 float clear_alpha
= has_alpha
? color
->f
[3] : 1.0f
;
1236 /* Packed color depends on the framebuffer format */
1238 const struct util_format_description
*desc
=
1239 util_format_description(format
);
1241 if (util_format_is_rgba8_variant(desc
)) {
1242 pan_pack_color_32(packed
,
1243 ((uint32_t) float_to_ubyte(clear_alpha
) << 24) |
1244 ((uint32_t) float_to_ubyte(color
->f
[2]) << 16) |
1245 ((uint32_t) float_to_ubyte(color
->f
[1]) << 8) |
1246 ((uint32_t) float_to_ubyte(color
->f
[0]) << 0));
1247 } else if (format
== PIPE_FORMAT_B5G6R5_UNORM
) {
1248 /* First, we convert the components to R5, G6, B5 separately */
1249 unsigned r5
= _mesa_roundevenf(SATURATE(color
->f
[0]) * 31.0);
1250 unsigned g6
= _mesa_roundevenf(SATURATE(color
->f
[1]) * 63.0);
1251 unsigned b5
= _mesa_roundevenf(SATURATE(color
->f
[2]) * 31.0);
1253 /* Then we pack into a sparse u32. TODO: Why these shifts? */
1254 pan_pack_color_32(packed
, (b5
<< 25) | (g6
<< 14) | (r5
<< 5));
1255 } else if (format
== PIPE_FORMAT_B4G4R4A4_UNORM
) {
1256 /* Convert to 4-bits */
1257 unsigned r4
= _mesa_roundevenf(SATURATE(color
->f
[0]) * 15.0);
1258 unsigned g4
= _mesa_roundevenf(SATURATE(color
->f
[1]) * 15.0);
1259 unsigned b4
= _mesa_roundevenf(SATURATE(color
->f
[2]) * 15.0);
1260 unsigned a4
= _mesa_roundevenf(SATURATE(clear_alpha
) * 15.0);
1262 /* Pack on *byte* intervals */
1263 pan_pack_color_32(packed
, (a4
<< 28) | (b4
<< 20) | (g4
<< 12) | (r4
<< 4));
1264 } else if (format
== PIPE_FORMAT_B5G5R5A1_UNORM
) {
1265 /* Scale as expected but shift oddly */
1266 unsigned r5
= _mesa_roundevenf(SATURATE(color
->f
[0]) * 31.0);
1267 unsigned g5
= _mesa_roundevenf(SATURATE(color
->f
[1]) * 31.0);
1268 unsigned b5
= _mesa_roundevenf(SATURATE(color
->f
[2]) * 31.0);
1269 unsigned a1
= _mesa_roundevenf(SATURATE(clear_alpha
) * 1.0);
1271 pan_pack_color_32(packed
, (a1
<< 31) | (b5
<< 25) | (g5
<< 15) | (r5
<< 5));
1273 /* Otherwise, it's generic subject to replication */
1275 union util_color out
= { 0 };
1276 unsigned size
= util_format_get_blocksize(format
);
1278 util_pack_color(color
->f
, format
, &out
);
1281 unsigned b
= out
.ui
[0];
1282 unsigned s
= b
| (b
<< 8);
1283 pan_pack_color_32(packed
, s
| (s
<< 16));
1284 } else if (size
== 2)
1285 pan_pack_color_32(packed
, out
.ui
[0] | (out
.ui
[0] << 16));
1286 else if (size
== 3 || size
== 4)
1287 pan_pack_color_32(packed
, out
.ui
[0]);
1289 pan_pack_color_64(packed
, out
.ui
[0], out
.ui
[1] | (out
.ui
[1] << 16)); /* RGB16F -- RGBB */
1291 pan_pack_color_64(packed
, out
.ui
[0], out
.ui
[1]);
1292 else if (size
== 16)
1293 memcpy(packed
, out
.ui
, 16);
1295 unreachable("Unknown generic format size packing clear colour");
1300 panfrost_batch_clear(struct panfrost_batch
*batch
,
1302 const union pipe_color_union
*color
,
1303 double depth
, unsigned stencil
)
1305 struct panfrost_context
*ctx
= batch
->ctx
;
1307 if (buffers
& PIPE_CLEAR_COLOR
) {
1308 for (unsigned i
= 0; i
< PIPE_MAX_COLOR_BUFS
; ++i
) {
1309 if (!(buffers
& (PIPE_CLEAR_COLOR0
<< i
)))
1312 enum pipe_format format
= ctx
->pipe_framebuffer
.cbufs
[i
]->format
;
1313 pan_pack_color(batch
->clear_color
[i
], color
, format
);
1317 if (buffers
& PIPE_CLEAR_DEPTH
) {
1318 batch
->clear_depth
= depth
;
1321 if (buffers
& PIPE_CLEAR_STENCIL
) {
1322 batch
->clear_stencil
= stencil
;
1325 batch
->clear
|= buffers
;
1327 /* Clearing affects the entire framebuffer (by definition -- this is
1328 * the Gallium clear callback, which clears the whole framebuffer. If
1329 * the scissor test were enabled from the GL side, the gallium frontend
1330 * would emit a quad instead and we wouldn't go down this code path) */
1332 panfrost_batch_union_scissor(batch
, 0, 0,
1333 ctx
->pipe_framebuffer
.width
,
1334 ctx
->pipe_framebuffer
.height
);
1338 panfrost_batch_compare(const void *a
, const void *b
)
1340 return util_framebuffer_state_equal(a
, b
);
1344 panfrost_batch_hash(const void *key
)
1346 return _mesa_hash_data(key
, sizeof(struct pipe_framebuffer_state
));
1349 /* Given a new bounding rectangle (scissor), let the job cover the union of the
1350 * new and old bounding rectangles */
1353 panfrost_batch_union_scissor(struct panfrost_batch
*batch
,
1354 unsigned minx
, unsigned miny
,
1355 unsigned maxx
, unsigned maxy
)
1357 batch
->minx
= MIN2(batch
->minx
, minx
);
1358 batch
->miny
= MIN2(batch
->miny
, miny
);
1359 batch
->maxx
= MAX2(batch
->maxx
, maxx
);
1360 batch
->maxy
= MAX2(batch
->maxy
, maxy
);
1364 panfrost_batch_intersection_scissor(struct panfrost_batch
*batch
,
1365 unsigned minx
, unsigned miny
,
1366 unsigned maxx
, unsigned maxy
)
1368 batch
->minx
= MAX2(batch
->minx
, minx
);
1369 batch
->miny
= MAX2(batch
->miny
, miny
);
1370 batch
->maxx
= MIN2(batch
->maxx
, maxx
);
1371 batch
->maxy
= MIN2(batch
->maxy
, maxy
);
1374 /* Are we currently rendering to the dev (rather than an FBO)? */
1377 panfrost_batch_is_scanout(struct panfrost_batch
*batch
)
1379 /* If there is no color buffer, it's an FBO */
1380 if (batch
->key
.nr_cbufs
!= 1)
1383 /* If we're too early that no framebuffer was sent, it's scanout */
1384 if (!batch
->key
.cbufs
[0])
1387 return batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_DISPLAY_TARGET
||
1388 batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_SCANOUT
||
1389 batch
->key
.cbufs
[0]->texture
->bind
& PIPE_BIND_SHARED
;
1393 panfrost_batch_init(struct panfrost_context
*ctx
)
1395 ctx
->batches
= _mesa_hash_table_create(ctx
,
1396 panfrost_batch_hash
,
1397 panfrost_batch_compare
);
1398 ctx
->accessed_bos
= _mesa_hash_table_create(ctx
, _mesa_hash_pointer
,
1399 _mesa_key_pointer_equal
);